Lines Matching +full:wp +full:- +full:content
1 // SPDX-License-Identifier: GPL-2.0-only
100 if (spool->count) in subpool_is_free()
102 if (spool->max_hpages != -1) in subpool_is_free()
103 return spool->used_hpages == 0; in subpool_is_free()
104 if (spool->min_hpages != -1) in subpool_is_free()
105 return spool->rsv_hpages == spool->min_hpages; in subpool_is_free()
113 spin_unlock_irqrestore(&spool->lock, irq_flags); in unlock_or_release_subpool()
119 if (spool->min_hpages != -1) in unlock_or_release_subpool()
120 hugetlb_acct_memory(spool->hstate, in unlock_or_release_subpool()
121 -spool->min_hpages); in unlock_or_release_subpool()
135 spin_lock_init(&spool->lock); in hugepage_new_subpool()
136 spool->count = 1; in hugepage_new_subpool()
137 spool->max_hpages = max_hpages; in hugepage_new_subpool()
138 spool->hstate = h; in hugepage_new_subpool()
139 spool->min_hpages = min_hpages; in hugepage_new_subpool()
141 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool()
145 spool->rsv_hpages = min_hpages; in hugepage_new_subpool()
154 spin_lock_irqsave(&spool->lock, flags); in hugepage_put_subpool()
155 BUG_ON(!spool->count); in hugepage_put_subpool()
156 spool->count--; in hugepage_put_subpool()
162 * Return -ENOMEM if there are not enough resources to satisfy the
176 spin_lock_irq(&spool->lock); in hugepage_subpool_get_pages()
178 if (spool->max_hpages != -1) { /* maximum size accounting */ in hugepage_subpool_get_pages()
179 if ((spool->used_hpages + delta) <= spool->max_hpages) in hugepage_subpool_get_pages()
180 spool->used_hpages += delta; in hugepage_subpool_get_pages()
182 ret = -ENOMEM; in hugepage_subpool_get_pages()
188 if (spool->min_hpages != -1 && spool->rsv_hpages) { in hugepage_subpool_get_pages()
189 if (delta > spool->rsv_hpages) { in hugepage_subpool_get_pages()
194 ret = delta - spool->rsv_hpages; in hugepage_subpool_get_pages()
195 spool->rsv_hpages = 0; in hugepage_subpool_get_pages()
198 spool->rsv_hpages -= delta; in hugepage_subpool_get_pages()
203 spin_unlock_irq(&spool->lock); in hugepage_subpool_get_pages()
222 spin_lock_irqsave(&spool->lock, flags); in hugepage_subpool_put_pages()
224 if (spool->max_hpages != -1) /* maximum size accounting */ in hugepage_subpool_put_pages()
225 spool->used_hpages -= delta; in hugepage_subpool_put_pages()
228 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { in hugepage_subpool_put_pages()
229 if (spool->rsv_hpages + delta <= spool->min_hpages) in hugepage_subpool_put_pages()
232 ret = spool->rsv_hpages + delta - spool->min_hpages; in hugepage_subpool_put_pages()
234 spool->rsv_hpages += delta; in hugepage_subpool_put_pages()
235 if (spool->rsv_hpages > spool->min_hpages) in hugepage_subpool_put_pages()
236 spool->rsv_hpages = spool->min_hpages; in hugepage_subpool_put_pages()
250 return HUGETLBFS_SB(inode->i_sb)->spool; in subpool_inode()
255 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma()
266 VM_BUG_ON(resv->region_cache_count <= 0); in get_file_region_entry_from_cache()
268 resv->region_cache_count--; in get_file_region_entry_from_cache()
269 nrg = list_first_entry(&resv->region_cache, struct file_region, link); in get_file_region_entry_from_cache()
270 list_del(&nrg->link); in get_file_region_entry_from_cache()
272 nrg->from = from; in get_file_region_entry_from_cache()
273 nrg->to = to; in get_file_region_entry_from_cache()
282 nrg->reservation_counter = rg->reservation_counter; in copy_hugetlb_cgroup_uncharge_info()
283 nrg->css = rg->css; in copy_hugetlb_cgroup_uncharge_info()
284 if (rg->css) in copy_hugetlb_cgroup_uncharge_info()
285 css_get(rg->css); in copy_hugetlb_cgroup_uncharge_info()
297 nrg->reservation_counter = in record_hugetlb_cgroup_uncharge_info()
298 &h_cg->rsvd_hugepage[hstate_index(h)]; in record_hugetlb_cgroup_uncharge_info()
299 nrg->css = &h_cg->css; in record_hugetlb_cgroup_uncharge_info()
301 * The caller will hold exactly one h_cg->css reference for the in record_hugetlb_cgroup_uncharge_info()
306 * exactly one h_cg->css reference, we should do css_get for in record_hugetlb_cgroup_uncharge_info()
310 css_get(&h_cg->css); in record_hugetlb_cgroup_uncharge_info()
311 if (!resv->pages_per_hpage) in record_hugetlb_cgroup_uncharge_info()
312 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info()
316 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info()
318 nrg->reservation_counter = NULL; in record_hugetlb_cgroup_uncharge_info()
319 nrg->css = NULL; in record_hugetlb_cgroup_uncharge_info()
327 if (rg->css) in put_uncharge_info()
328 css_put(rg->css); in put_uncharge_info()
336 return rg->reservation_counter == org->reservation_counter && in has_same_uncharge_info()
337 rg->css == org->css; in has_same_uncharge_info()
349 if (&prg->link != &resv->regions && prg->to == rg->from && in coalesce_file_region()
351 prg->to = rg->to; in coalesce_file_region()
353 list_del(&rg->link); in coalesce_file_region()
361 if (&nrg->link != &resv->regions && nrg->from == rg->to && in coalesce_file_region()
363 nrg->from = rg->from; in coalesce_file_region()
365 list_del(&rg->link); in coalesce_file_region()
381 list_add(&nrg->link, rg); in hugetlb_resv_map_add()
386 return to - from; in hugetlb_resv_map_add()
390 * Must be called with resv->lock held.
402 struct list_head *head = &resv->regions; in add_reservation_in_range()
411 * [last_accounted_offset, iter->from), at every iteration, with some in add_reservation_in_range()
416 if (iter->from < f) { in add_reservation_in_range()
420 if (iter->to > last_accounted_offset) in add_reservation_in_range()
421 last_accounted_offset = iter->to; in add_reservation_in_range()
428 if (iter->from >= t) { in add_reservation_in_range()
429 rg = iter->link.prev; in add_reservation_in_range()
433 /* Add an entry for last_accounted_offset -> iter->from, and in add_reservation_in_range()
436 if (iter->from > last_accounted_offset) in add_reservation_in_range()
437 add += hugetlb_resv_map_add(resv, iter->link.prev, in add_reservation_in_range()
439 iter->from, h, h_cg, in add_reservation_in_range()
442 last_accounted_offset = iter->to; in add_reservation_in_range()
449 rg = head->prev; in add_reservation_in_range()
457 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
461 __must_hold(&resv->lock) in allocate_file_region_entries()
478 while (resv->region_cache_count < in allocate_file_region_entries()
479 (resv->adds_in_progress + regions_needed)) { in allocate_file_region_entries()
480 to_allocate = resv->adds_in_progress + regions_needed - in allocate_file_region_entries()
481 resv->region_cache_count; in allocate_file_region_entries()
487 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); in allocate_file_region_entries()
489 spin_unlock(&resv->lock); in allocate_file_region_entries()
494 list_add(&trg->link, &allocated_regions); in allocate_file_region_entries()
497 spin_lock(&resv->lock); in allocate_file_region_entries()
499 list_splice(&allocated_regions, &resv->region_cache); in allocate_file_region_entries()
500 resv->region_cache_count += to_allocate; in allocate_file_region_entries()
507 list_del(&rg->link); in allocate_file_region_entries()
510 return -ENOMEM; in allocate_file_region_entries()
525 * this operation and we were not able to allocate, it returns -ENOMEM.
536 spin_lock(&resv->lock); in region_add()
553 resv->region_cache_count < in region_add()
554 resv->adds_in_progress + in region_add()
555 (actual_regions_needed - in_regions_needed)) { in region_add()
559 VM_BUG_ON(t - f <= 1); in region_add()
562 resv, actual_regions_needed - in_regions_needed)) { in region_add()
563 return -ENOMEM; in region_add()
571 resv->adds_in_progress -= in_regions_needed; in region_add()
573 spin_unlock(&resv->lock); in region_add()
589 * resv->adds_in_progress. This value needs to be provided to a follow up call
594 * zero. -ENOMEM is returned if a new file_region structure or cache entry
602 spin_lock(&resv->lock); in region_chg()
612 return -ENOMEM; in region_chg()
614 resv->adds_in_progress += *out_regions_needed; in region_chg()
616 spin_unlock(&resv->lock); in region_chg()
636 spin_lock(&resv->lock); in region_abort()
637 VM_BUG_ON(!resv->region_cache_count); in region_abort()
638 resv->adds_in_progress -= regions_needed; in region_abort()
639 spin_unlock(&resv->lock); in region_abort()
651 * be allocated. If the allocation fails, -ENOMEM will be returned.
653 * a region and possibly return -ENOMEM. Callers specifying
654 * t == LONG_MAX do not need to check for -ENOMEM error.
658 struct list_head *head = &resv->regions; in region_del()
664 spin_lock(&resv->lock); in region_del()
673 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) in region_del()
676 if (rg->from >= t) in region_del()
679 if (f > rg->from && t < rg->to) { /* Must split region */ in region_del()
685 resv->region_cache_count > resv->adds_in_progress) { in region_del()
686 nrg = list_first_entry(&resv->region_cache, in region_del()
689 list_del(&nrg->link); in region_del()
690 resv->region_cache_count--; in region_del()
694 spin_unlock(&resv->lock); in region_del()
697 return -ENOMEM; in region_del()
701 del += t - f; in region_del()
703 resv, rg, t - f, false); in region_del()
706 nrg->from = t; in region_del()
707 nrg->to = rg->to; in region_del()
711 INIT_LIST_HEAD(&nrg->link); in region_del()
714 rg->to = f; in region_del()
716 list_add(&nrg->link, &rg->link); in region_del()
721 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ in region_del()
722 del += rg->to - rg->from; in region_del()
724 rg->to - rg->from, true); in region_del()
725 list_del(&rg->link); in region_del()
730 if (f <= rg->from) { /* Trim beginning of region */ in region_del()
732 t - rg->from, false); in region_del()
734 del += t - rg->from; in region_del()
735 rg->from = t; in region_del()
738 rg->to - f, false); in region_del()
740 del += rg->to - f; in region_del()
741 rg->to = f; in region_del()
745 spin_unlock(&resv->lock); in region_del()
785 struct list_head *head = &resv->regions; in region_count()
789 spin_lock(&resv->lock); in region_count()
795 if (rg->to <= f) in region_count()
797 if (rg->from >= t) in region_count()
800 seg_from = max(rg->from, f); in region_count()
801 seg_to = min(rg->to, t); in region_count()
803 chg += seg_to - seg_from; in region_count()
805 spin_unlock(&resv->lock); in region_count()
817 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
818 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
834 if (vma->vm_ops && vma->vm_ops->pagesize) in vma_kernel_pagesize()
835 return vma->vm_ops->pagesize(vma); in vma_kernel_pagesize()
843 * architectures where it differs, an architecture-specific 'strong'
881 return (unsigned long)vma->vm_private_data; in get_vma_private_data()
887 vma->vm_private_data = (void *)value; in set_vma_private_data()
897 resv_map->reservation_counter = NULL; in resv_map_set_hugetlb_cgroup_uncharge_info()
898 resv_map->pages_per_hpage = 0; in resv_map_set_hugetlb_cgroup_uncharge_info()
899 resv_map->css = NULL; in resv_map_set_hugetlb_cgroup_uncharge_info()
901 resv_map->reservation_counter = in resv_map_set_hugetlb_cgroup_uncharge_info()
902 &h_cg->rsvd_hugepage[hstate_index(h)]; in resv_map_set_hugetlb_cgroup_uncharge_info()
903 resv_map->pages_per_hpage = pages_per_huge_page(h); in resv_map_set_hugetlb_cgroup_uncharge_info()
904 resv_map->css = &h_cg->css; in resv_map_set_hugetlb_cgroup_uncharge_info()
920 kref_init(&resv_map->refs); in resv_map_alloc()
921 spin_lock_init(&resv_map->lock); in resv_map_alloc()
922 INIT_LIST_HEAD(&resv_map->regions); in resv_map_alloc()
924 resv_map->adds_in_progress = 0; in resv_map_alloc()
928 * re-initialized to the proper values, to indicate that hugetlb cgroup in resv_map_alloc()
929 * reservations are to be un-charged from here. in resv_map_alloc()
933 INIT_LIST_HEAD(&resv_map->region_cache); in resv_map_alloc()
934 list_add(&rg->link, &resv_map->region_cache); in resv_map_alloc()
935 resv_map->region_cache_count = 1; in resv_map_alloc()
943 struct list_head *head = &resv_map->region_cache; in resv_map_release()
951 list_del(&rg->link); in resv_map_release()
955 VM_BUG_ON(resv_map->adds_in_progress); in resv_map_release()
967 * The VERY common case is inode->mapping == &inode->i_data but, in inode_resv_map()
970 return (struct resv_map *)(&inode->i_data)->private_data; in inode_resv_map()
976 if (vma->vm_flags & VM_MAYSHARE) { in vma_resv_map()
977 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map()
978 struct inode *inode = mapping->host; in vma_resv_map()
991 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_map()
1000 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_flags()
1017 * - For shared mappings this is a per-vma semaphore that may be in hugetlb_dup_vma_private()
1023 * - For MAP_PRIVATE mappings, this is the reserve map which does in hugetlb_dup_vma_private()
1025 * not guaranteed to succeed, even if read-only. in hugetlb_dup_vma_private()
1027 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_dup_vma_private()
1028 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_dup_vma_private()
1030 if (vma_lock && vma_lock->vma != vma) in hugetlb_dup_vma_private()
1031 vma->vm_private_data = NULL; in hugetlb_dup_vma_private()
1033 vma->vm_private_data = NULL; in hugetlb_dup_vma_private()
1038 * Called with mm->mmap_sem writer semaphore held.
1061 kref_put(&reservations->refs, resv_map_release); in clear_vma_resv_huge_pages()
1070 if (vma->vm_flags & VM_NORESERVE) { in vma_has_reserves()
1078 * properly, so add work-around here. in vma_has_reserves()
1080 if (vma->vm_flags & VM_MAYSHARE && chg == 0) in vma_has_reserves()
1087 if (vma->vm_flags & VM_MAYSHARE) { in vma_has_reserves()
1111 * Very Subtle - The value of chg comes from a previous in vma_has_reserves()
1137 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
1138 h->free_huge_pages++; in enqueue_huge_page()
1139 h->free_huge_pages_node[nid]++; in enqueue_huge_page()
1146 bool pin = !!(current->flags & PF_MEMALLOC_PIN); in dequeue_huge_page_node_exact()
1149 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { in dequeue_huge_page_node_exact()
1156 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node_exact()
1159 h->free_huge_pages--; in dequeue_huge_page_node_exact()
1160 h->free_huge_pages_node[nid]--; in dequeue_huge_page_node_exact()
1205 return h->free_huge_pages - h->resv_huge_pages; in available_huge_pages()
1246 h->resv_huge_pages--; in dequeue_huge_page_vma()
1259 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1291 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); in hstate_next_node_to_alloc()
1292 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_alloc()
1298 * helper for remove_pool_huge_page() - return the previously saved
1309 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); in hstate_next_node_to_free()
1310 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_free()
1319 nr_nodes--)
1325 nr_nodes--)
1327 /* used to demote non-gigantic_huge pages as well */
1340 p->mapping = NULL; in __destroy_compound_gigantic_page()
1457 list_del(&page->lru); in __remove_hugetlb_page()
1460 h->free_huge_pages--; in __remove_hugetlb_page()
1461 h->free_huge_pages_node[nid]--; in __remove_hugetlb_page()
1464 h->surplus_huge_pages--; in __remove_hugetlb_page()
1465 h->surplus_huge_pages_node[nid]--; in __remove_hugetlb_page()
1471 * For non-gigantic pages set the destructor to the normal compound in __remove_hugetlb_page()
1495 h->nr_huge_pages--; in __remove_hugetlb_page()
1496 h->nr_huge_pages_node[nid]--; in __remove_hugetlb_page()
1521 INIT_LIST_HEAD(&page->lru); in add_hugetlb_page()
1522 h->nr_huge_pages++; in add_hugetlb_page()
1523 h->nr_huge_pages_node[nid]++; in add_hugetlb_page()
1526 h->surplus_huge_pages++; in add_hugetlb_page()
1527 h->surplus_huge_pages_node[nid]++; in add_hugetlb_page()
1593 subpage->flags &= ~(1 << PG_locked | 1 << PG_error | in __update_and_free_page()
1600 * Non-gigantic pages demoted from CMA allocated gigantic pages in __update_and_free_page()
1619 * freed and frees them one-by-one. As the page->mapping pointer is going
1637 node = node->next; in free_hpage_workfn()
1638 page->mapping = NULL; in free_hpage_workfn()
1675 if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist)) in update_and_free_page()
1718 page->mapping = NULL; in free_huge_page()
1748 h->resv_huge_pages++; in free_huge_page()
1754 } else if (h->surplus_huge_pages_node[nid]) { in free_huge_page()
1772 h->nr_huge_pages++; in __prep_account_new_huge_page()
1773 h->nr_huge_pages_node[nid]++; in __prep_account_new_huge_page()
1779 INIT_LIST_HEAD(&page->lru); in __prep_new_huge_page()
1810 * boot, it's safer to be consistent with the not-gigantic in __prep_compound_gigantic_page()
1850 atomic_set(compound_mapcount_ptr(page), -1); in __prep_compound_gigantic_page()
1941 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); in hugetlb_basepage_index()
1943 compound_idx = page - page_head; in hugetlb_basepage_index()
2037 * Free pages and try again - ONCE! in alloc_fresh_huge_page()
2098 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && in remove_pool_huge_page()
2099 !list_empty(&h->hugepage_freelists[node])) { in remove_pool_huge_page()
2100 page = list_entry(h->hugepage_freelists[node].next, in remove_pool_huge_page()
2112 * nothing for in-use hugepages and non-hugepages.
2115 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2119 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
2126 int rc = -EBUSY; in dissolve_free_huge_page()
2154 * Theoretically, we should return -EBUSY when we in dissolve_free_huge_page()
2165 h->max_huge_pages--; in dissolve_free_huge_page()
2182 h->max_huge_pages++; in dissolve_free_huge_page()
2238 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) in alloc_surplus_huge_page()
2254 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { in alloc_surplus_huge_page()
2261 h->surplus_huge_pages++; in alloc_surplus_huge_page()
2262 h->surplus_huge_pages_node[page_to_nid(page)]++; in alloc_surplus_huge_page()
2376 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; in gather_surplus_pages()
2378 h->resv_huge_pages += delta; in gather_surplus_pages()
2384 ret = -ENOMEM; in gather_surplus_pages()
2394 list_add(&page->lru, &surplus_list); in gather_surplus_pages()
2404 needed = (h->resv_huge_pages + delta) - in gather_surplus_pages()
2405 (h->free_huge_pages + allocated); in gather_surplus_pages()
2425 h->resv_huge_pages += delta; in gather_surplus_pages()
2430 if ((--needed) < 0) in gather_surplus_pages()
2466 h->resv_huge_pages -= unused_resv_pages; in return_unused_surplus_pages()
2473 * by pre-allocated pages. Only free surplus pages. in return_unused_surplus_pages()
2475 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); in return_unused_surplus_pages()
2483 * on-line nodes with memory and will handle the hstate accounting. in return_unused_surplus_pages()
2485 while (nr_pages--) { in return_unused_surplus_pages()
2490 list_add(&page->lru, &page_list); in return_unused_surplus_pages()
2569 if (vma->vm_flags & VM_MAYSHARE) { in __vma_reservation_common()
2579 if (vma->vm_flags & VM_MAYSHARE) { in __vma_reservation_common()
2592 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) in __vma_reservation_common()
2601 * Subtle - The reserve map for private mappings has the in __vma_reservation_common()
2721 if (!(vma->vm_flags & VM_MAYSHARE)) in restore_reserve_on_error()
2740 * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
2763 return -ENOMEM; in alloc_and_dissolve_huge_page()
2776 * Fail with -EBUSY if not possible. in alloc_and_dissolve_huge_page()
2830 int ret = -EBUSY; in isolate_or_dissolve_huge_page()
2849 * alloc_contig_range and them. Return -ENOMEM as this has the effect in isolate_or_dissolve_huge_page()
2853 return -ENOMEM; in isolate_or_dissolve_huge_page()
2883 return ERR_PTR(-ENOMEM); in alloc_huge_page()
2896 return ERR_PTR(-ENOSPC); in alloc_huge_page()
2940 h->resv_huge_pages--; in alloc_huge_page()
2942 list_add(&page->lru, &h->hugepage_activelist); in alloc_huge_page()
2973 hugetlb_acct_memory(h, -rsv_adjust); in alloc_huge_page()
2990 return ERR_PTR(-ENOSPC); in alloc_huge_page()
3025 INIT_LIST_HEAD(&m->list); in __alloc_bootmem_huge_page()
3026 list_add(&m->list, &huge_boot_pages); in __alloc_bootmem_huge_page()
3027 m->hstate = h; in __alloc_bootmem_huge_page()
3041 struct hstate *h = m->hstate; in gather_bootmem_prealloc()
3057 * other side-effects, like CommitLimit going negative. in gather_bootmem_prealloc()
3068 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { in hugetlb_hstate_alloc_pages_onenode()
3084 if (i == h->max_huge_pages_node[nid]) in hugetlb_hstate_alloc_pages_onenode()
3089 h->max_huge_pages_node[nid], buf, nid, i); in hugetlb_hstate_alloc_pages_onenode()
3090 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); in hugetlb_hstate_alloc_pages_onenode()
3091 h->max_huge_pages_node[nid] = i; in hugetlb_hstate_alloc_pages_onenode()
3108 if (h->max_huge_pages_node[i] > 0) { in hugetlb_hstate_alloc_pages()
3120 * Bit mask controlling how hard we retry per-node allocations. in hugetlb_hstate_alloc_pages()
3132 /* bit mask controlling how hard we retry per-node allocations */ in hugetlb_hstate_alloc_pages()
3136 for (i = 0; i < h->max_huge_pages; ++i) { in hugetlb_hstate_alloc_pages()
3146 if (i < h->max_huge_pages) { in hugetlb_hstate_alloc_pages()
3151 h->max_huge_pages, buf, i); in hugetlb_hstate_alloc_pages()
3152 h->max_huge_pages = i; in hugetlb_hstate_alloc_pages()
3168 * h->demote_order is initially 0. in hugetlb_init_hstates()
3169 * - We can not demote gigantic pages if runtime freeing in hugetlb_init_hstates()
3171 * - If CMA allocation is possible, we can not demote in hugetlb_init_hstates()
3176 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) in hugetlb_init_hstates()
3181 if (h2->order < h->order && in hugetlb_init_hstates()
3182 h2->order > h->demote_order) in hugetlb_init_hstates()
3183 h->demote_order = h2->order; in hugetlb_init_hstates()
3196 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", in report_hugepages()
3197 buf, h->free_huge_pages); in report_hugepages()
3219 struct list_head *freel = &h->hugepage_freelists[i]; in try_to_free_low()
3221 if (count >= h->nr_huge_pages) in try_to_free_low()
3226 list_add(&page->lru, &page_list); in try_to_free_low()
3243 * Increment or decrement surplus_huge_pages. Keep node-specific counters
3244 * balanced by operating on them in a round-robin fashion.
3253 VM_BUG_ON(delta != -1 && delta != 1); in adjust_pool_surplus()
3257 if (h->surplus_huge_pages_node[node]) in adjust_pool_surplus()
3262 if (h->surplus_huge_pages_node[node] < in adjust_pool_surplus()
3263 h->nr_huge_pages_node[node]) in adjust_pool_surplus()
3270 h->surplus_huge_pages += delta; in adjust_pool_surplus()
3271 h->surplus_huge_pages_node[node] += delta; in adjust_pool_surplus()
3275 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3285 * Bit mask controlling how hard we retry per-node allocations. in set_max_huge_pages()
3292 return -ENOMEM; in set_max_huge_pages()
3298 mutex_lock(&h->resize_lock); in set_max_huge_pages()
3311 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; in set_max_huge_pages()
3332 mutex_unlock(&h->resize_lock); in set_max_huge_pages()
3334 return -EINVAL; in set_max_huge_pages()
3350 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages()
3351 if (!adjust_pool_surplus(h, nodes_allowed, -1)) in set_max_huge_pages()
3372 /* Bail for signals. Probably ctrl-c from user */ in set_max_huge_pages()
3392 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; in set_max_huge_pages()
3404 list_add(&page->lru, &page_list); in set_max_huge_pages()
3417 h->max_huge_pages = persistent_huge_pages(h); in set_max_huge_pages()
3419 mutex_unlock(&h->resize_lock); in set_max_huge_pages()
3433 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); in demote_free_huge_page()
3458 * Note that we already hold h->resize_lock. To prevent deadlock, in demote_free_huge_page()
3461 mutex_lock(&target_hstate->resize_lock); in demote_free_huge_page()
3467 target_hstate->order); in demote_free_huge_page()
3469 prep_compound_page(subpage, target_hstate->order); in demote_free_huge_page()
3474 mutex_unlock(&target_hstate->resize_lock); in demote_free_huge_page()
3482 h->max_huge_pages--; in demote_free_huge_page()
3483 target_hstate->max_huge_pages += in demote_free_huge_page()
3498 if (!h->demote_order) { in demote_pool_huge_page()
3500 return -EINVAL; /* internal error */ in demote_pool_huge_page()
3504 list_for_each_entry(page, &h->hugepage_freelists[node], lru) { in demote_pool_huge_page()
3514 * Return -EBUSY so that caller will not retry. in demote_pool_huge_page()
3516 return -EBUSY; in demote_pool_huge_page()
3556 nr_huge_pages = h->nr_huge_pages; in nr_hugepages_show_common()
3558 nr_huge_pages = h->nr_huge_pages_node[nid]; in nr_hugepages_show_common()
3571 return -EINVAL; in __nr_hugepages_store_common()
3629 * hstate attribute for optionally mempolicy-based constraint on persistent
3652 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); in nr_overcommit_hugepages_show()
3663 return -EINVAL; in nr_overcommit_hugepages_store()
3670 h->nr_overcommit_huge_pages = input; in nr_overcommit_hugepages_store()
3686 free_huge_pages = h->free_huge_pages; in free_hugepages_show()
3688 free_huge_pages = h->free_huge_pages_node[nid]; in free_hugepages_show()
3698 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); in resv_hugepages_show()
3711 surplus_huge_pages = h->surplus_huge_pages; in surplus_hugepages_show()
3713 surplus_huge_pages = h->surplus_huge_pages_node[nid]; in surplus_hugepages_show()
3742 mutex_lock(&h->resize_lock); in demote_store()
3751 nr_available = h->free_huge_pages_node[nid]; in demote_store()
3753 nr_available = h->free_huge_pages; in demote_store()
3754 nr_available -= h->resv_huge_pages; in demote_store()
3762 nr_demote--; in demote_store()
3766 mutex_unlock(&h->resize_lock); in demote_store()
3778 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; in demote_size_show()
3795 return -EINVAL; in demote_size_store()
3796 demote_order = demote_hstate->order; in demote_size_store()
3798 return -EINVAL; in demote_size_store()
3802 if (demote_order >= h->order) in demote_size_store()
3803 return -EINVAL; in demote_size_store()
3806 mutex_lock(&h->resize_lock); in demote_size_store()
3807 h->demote_order = demote_order; in demote_size_store()
3808 mutex_unlock(&h->resize_lock); in demote_size_store()
3847 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); in hugetlb_sysfs_add_hstate()
3849 return -ENOMEM; in hugetlb_sysfs_add_hstate()
3858 if (h->demote_order) { in hugetlb_sysfs_add_hstate()
3862 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); in hugetlb_sysfs_add_hstate()
3877 * node_hstate/s - associate per node hstate attributes, via their kobjects,
3904 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
3905 * Returns node id via non-NULL nidp.
3915 if (nhs->hstate_kobjs[i] == kobj) { in kobj_to_node_hstate()
3928 * No-op if no hstate attributes attached.
3933 struct node_hstate *nhs = &node_hstates[node->dev.id]; in hugetlb_unregister_node()
3935 if (!nhs->hugepages_kobj) in hugetlb_unregister_node()
3940 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx]; in hugetlb_unregister_node()
3944 if (h->demote_order) in hugetlb_unregister_node()
3948 nhs->hstate_kobjs[idx] = NULL; in hugetlb_unregister_node()
3951 kobject_put(nhs->hugepages_kobj); in hugetlb_unregister_node()
3952 nhs->hugepages_kobj = NULL; in hugetlb_unregister_node()
3958 * No-op if attributes already registered.
3963 struct node_hstate *nhs = &node_hstates[node->dev.id]; in hugetlb_register_node()
3969 if (nhs->hugepages_kobj) in hugetlb_register_node()
3972 nhs->hugepages_kobj = kobject_create_and_add("hugepages", in hugetlb_register_node()
3973 &node->dev.kobj); in hugetlb_register_node()
3974 if (!nhs->hugepages_kobj) in hugetlb_register_node()
3978 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, in hugetlb_register_node()
3979 nhs->hstate_kobjs, in hugetlb_register_node()
3983 h->name, node->dev.id); in hugetlb_register_node()
3992 * devices of nodes that have memory. All on-line nodes should have
4008 *nidp = -1; in kobj_to_node_hstate()
4037 pr_err("HugeTLB: Unable to add hstate %s", h->name); in hugetlb_sysfs_init()
4055 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); in hugetlb_init()
4135 mutex_init(&h->resize_lock); in hugetlb_add_hstate()
4136 h->order = order; in hugetlb_add_hstate()
4137 h->mask = ~(huge_page_size(h) - 1); in hugetlb_add_hstate()
4139 INIT_LIST_HEAD(&h->hugepage_freelists[i]); in hugetlb_add_hstate()
4140 INIT_LIST_HEAD(&h->hugepage_activelist); in hugetlb_add_hstate()
4141 h->next_nid_to_alloc = first_memory_node; in hugetlb_add_hstate()
4142 h->next_nid_to_free = first_memory_node; in hugetlb_add_hstate()
4143 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", in hugetlb_add_hstate()
4161 parsed_hstate->max_huge_pages = 0; in hugepages_clear_pages_in_node()
4162 memset(parsed_hstate->max_huge_pages_node, 0, in hugepages_clear_pages_in_node()
4163 sizeof(parsed_hstate->max_huge_pages_node)); in hugepages_clear_pages_in_node()
4198 mhp = &parsed_hstate->max_huge_pages; in hugepages_setup()
4225 parsed_hstate->max_huge_pages_node[node] = tmp; in hugepages_setup()
4304 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); in hugepagesz_setup()
4332 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); in default_hugepagesz_setup()
4367 if (mpol->mode == MPOL_BIND && in policy_mbind_nodemask()
4369 cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) in policy_mbind_nodemask()
4370 return &mpol->nodes; in policy_mbind_nodemask()
4380 unsigned int *array = h->free_huge_pages_node; in allowed_mems_nr()
4414 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common()
4418 return -EOPNOTSUPP; in hugetlb_sysctl_handler_common()
4457 return -EOPNOTSUPP; in hugetlb_overcommit_handler()
4459 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
4462 return -EINVAL; in hugetlb_overcommit_handler()
4471 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
4489 unsigned long count = h->nr_huge_pages; in hugetlb_report_meminfo()
4501 h->free_huge_pages, in hugetlb_report_meminfo()
4502 h->resv_huge_pages, in hugetlb_report_meminfo()
4503 h->surplus_huge_pages, in hugetlb_report_meminfo()
4521 nid, h->nr_huge_pages_node[nid], in hugetlb_report_node_meminfo()
4522 nid, h->free_huge_pages_node[nid], in hugetlb_report_node_meminfo()
4523 nid, h->surplus_huge_pages_node[nid]); in hugetlb_report_node_meminfo()
4536 h->nr_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4537 h->free_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4538 h->surplus_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4545 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10)); in hugetlb_report_usage()
4555 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); in hugetlb_total_pages()
4561 int ret = -ENOMEM; in hugetlb_acct_memory()
4602 return_unused_surplus_pages(h, (unsigned long) -delta); in hugetlb_acct_memory()
4624 kref_get(&resv->refs); in hugetlb_vm_op_open()
4633 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_vm_op_open()
4634 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vm_op_open()
4637 if (vma_lock->vma != vma) { in hugetlb_vm_op_open()
4638 vma->vm_private_data = NULL; in hugetlb_vm_op_open()
4661 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
4662 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
4664 reserve = (end - start) - region_count(resv, start, end); in hugetlb_vm_op_close()
4672 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_vm_op_close()
4675 kref_put(&resv->refs, resv_map_release); in hugetlb_vm_op_close()
4681 return -EINVAL; in hugetlb_vm_op_split()
4692 * handle_mm_fault() to try to instantiate regular-sized pages in the
4725 vma->vm_page_prot))); in make_huge_pte()
4728 vma->vm_page_prot)); in make_huge_pte()
4731 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); in make_huge_pte()
4778 set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1)); in hugetlb_install_page()
4779 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); in hugetlb_install_page()
4791 bool cow = is_cow_mapping(src_vma->vm_flags); in copy_hugetlb_page_range()
4801 src_vma->vm_start, in copy_hugetlb_page_range()
4802 src_vma->vm_end); in copy_hugetlb_page_range()
4805 raw_write_seqcount_begin(&src->write_protect_seq); in copy_hugetlb_page_range()
4817 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
4826 ret = -ENOMEM; in copy_hugetlb_page_range()
4881 * uffd-wp enabled. in copy_hugetlb_page_range()
4896 * When pre-allocating the page or copying data, we in copy_hugetlb_page_range()
4958 raw_write_seqcount_end(&src->write_protect_seq); in copy_hugetlb_page_range()
4971 struct mm_struct *mm = vma->vm_mm; in move_huge_pte()
4999 struct address_space *mapping = vma->vm_file->f_mapping; in move_hugetlb_page_tables()
5001 struct mm_struct *mm = vma->vm_mm; in move_hugetlb_page_tables()
5049 flush_tlb_range(vma, old_end - len, old_end); in move_hugetlb_page_tables()
5054 return len + old_addr - old_end; in move_hugetlb_page_tables()
5061 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range()
5122 * If the pte was wr-protected by uffd-wp in any of the in __unmap_hugepage_range()
5124 * drop the uffd-wp bit in this zap, then replace the in __unmap_hugepage_range()
5162 /* Leave a uffd-wp pte marker if needed */ in __unmap_hugepage_range()
5205 i_mmap_lock_write(vma->vm_file->f_mapping); in __unmap_hugepage_range_final()
5220 i_mmap_unlock_write(vma->vm_file->f_mapping); in __unmap_hugepage_range_final()
5222 i_mmap_unlock_write(vma->vm_file->f_mapping); in __unmap_hugepage_range_final()
5233 tlb_gather_mmu(&tlb, vma->vm_mm); in unmap_hugepage_range()
5257 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + in unmap_ref_private()
5258 vma->vm_pgoff; in unmap_ref_private()
5259 mapping = vma->vm_file->f_mapping; in unmap_ref_private()
5267 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { in unmap_ref_private()
5277 if (iter_vma->vm_flags & VM_MAYSHARE) in unmap_ref_private()
5283 * areas. This is because a future no-page fault on this VMA in unmap_ref_private()
5317 * hugetlb does not support FOLL_FORCE-style write faults that keep the in hugetlb_wp()
5320 if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE))) in hugetlb_wp()
5324 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_wp()
5338 * If no-one else is actually using this page, we're the exclusive in hugetlb_wp()
5384 struct address_space *mapping = vma->vm_file->f_mapping; in hugetlb_wp()
5413 * race occurs while re-acquiring page table in hugetlb_wp()
5491 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_present()
5504 struct inode *inode = mapping->host; in hugetlb_add_to_page_cache()
5519 * by non-hugetlbfs specific code paths. in hugetlb_add_to_page_cache()
5523 spin_lock(&inode->i_lock); in hugetlb_add_to_page_cache()
5524 inode->i_blocks += blocks_per_huge_page(h); in hugetlb_add_to_page_cache()
5525 spin_unlock(&inode->i_lock); in hugetlb_add_to_page_cache()
5606 current->pid); in hugetlb_no_page()
5617 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
5624 * without pgtable lock, we need to re-test under in hugetlb_no_page()
5627 * either changed or during-changing ptes and retry in hugetlb_no_page()
5673 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_no_page()
5677 * err can't be -EEXIST which implies someone in hugetlb_no_page()
5729 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { in hugetlb_no_page()
5749 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) in hugetlb_no_page()
5750 && (vma->vm_flags & VM_SHARED))); in hugetlb_no_page()
5752 * If this pte was previously wr-protected, keep it wr-protected even in hugetlb_no_page()
5760 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { in hugetlb_no_page()
5803 return hash & (num_fault_mutexes - 1); in hugetlb_fault_mutex_hash()
5835 * OK as we are only making decisions based on content and in hugetlb_fault()
5836 * not actually modifying content here. in hugetlb_fault()
5852 mapping = vma->vm_file->f_mapping; in hugetlb_fault()
5904 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) { in hugetlb_fault()
5921 /* Handle userfault-wp first, before trying to lock more pages */ in hugetlb_fault()
6010 struct address_space *mapping = dst_vma->vm_file->f_mapping; in hugetlb_mcopy_atomic_pte()
6013 int vm_shared = dst_vma->vm_flags & VM_SHARED; in hugetlb_mcopy_atomic_pte()
6016 int ret = -ENOMEM; in hugetlb_mcopy_atomic_pte()
6022 ret = -EFAULT; in hugetlb_mcopy_atomic_pte()
6029 * a non-missing case. Return -EEXIST. in hugetlb_mcopy_atomic_pte()
6033 ret = -EEXIST; in hugetlb_mcopy_atomic_pte()
6039 ret = -ENOMEM; in hugetlb_mcopy_atomic_pte()
6049 ret = -ENOENT; in hugetlb_mcopy_atomic_pte()
6061 ret = -ENOMEM; in hugetlb_mcopy_atomic_pte()
6075 ret = -EEXIST; in hugetlb_mcopy_atomic_pte()
6083 ret = -ENOMEM; in hugetlb_mcopy_atomic_pte()
6102 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_mcopy_atomic_pte()
6103 ret = -EFAULT; in hugetlb_mcopy_atomic_pte()
6121 ret = -EIO; in hugetlb_mcopy_atomic_pte()
6126 * We allow to overwrite a pte marker: consider when both MISSING|WP in hugetlb_mcopy_atomic_pte()
6127 * registered, we firstly wr-protect a none pte which has no page cache in hugetlb_mcopy_atomic_pte()
6130 ret = -EEXIST; in hugetlb_mcopy_atomic_pte()
6142 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY in hugetlb_mcopy_atomic_pte()
6143 * with wp flag set, don't set pte write bit. in hugetlb_mcopy_atomic_pte()
6148 writable = dst_vma->vm_flags & VM_WRITE; in hugetlb_mcopy_atomic_pte()
6167 /* No need to invalidate - it was non-present before */ in hugetlb_mcopy_atomic_pte()
6232 int err = -EFAULT, refs; in follow_hugetlb_page()
6234 while (vaddr < vma->vm_end && remainder) { in follow_hugetlb_page()
6308 * FAULT_FLAG_TRIED can co-exist in follow_hugetlb_page()
6348 (vaddr + huge_page_size(h) < vma->vm_end) && in follow_hugetlb_page()
6351 remainder -= pages_per_huge_page(h); in follow_hugetlb_page()
6358 refs = min3(pages_per_huge_page(h) - pfn_offset, remainder, in follow_hugetlb_page()
6359 (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT); in follow_hugetlb_page()
6382 err = -ENOMEM; in follow_hugetlb_page()
6388 remainder -= refs; in follow_hugetlb_page()
6408 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection()
6434 i_mmap_lock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
6446 * When uffd-wp is enabled on the vma, unshare in hugetlb_change_protection()
6488 * This is changing a non-present pte into a none pte, in hugetlb_change_protection()
6500 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); in hugetlb_change_protection()
6510 /* Safe to modify directly (none->non-present). */ in hugetlb_change_protection()
6533 i_mmap_unlock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
6537 return pages << h->order; in hugetlb_change_protection()
6546 long chg, add = -1; in hugetlb_reserve_pages()
6575 * to reserve the full area even if read-only as mprotect() may be in hugetlb_reserve_pages()
6576 * called to make the mapping read-write. Assume !vma is a shm mapping in hugetlb_reserve_pages()
6578 if (!vma || vma->vm_flags & VM_MAYSHARE) { in hugetlb_reserve_pages()
6593 chg = to - from; in hugetlb_reserve_pages()
6606 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { in hugetlb_reserve_pages()
6634 * the reservation was consumed. Private mappings are per-VMA and in hugetlb_reserve_pages()
6640 if (!vma || vma->vm_flags & VM_MAYSHARE) { in hugetlb_reserve_pages()
6644 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_reserve_pages()
6658 * reference to h_cg->css. See comment below for detail. in hugetlb_reserve_pages()
6662 (chg - add) * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
6665 chg - add); in hugetlb_reserve_pages()
6666 hugetlb_acct_memory(h, -rsv_adjust); in hugetlb_reserve_pages()
6670 * h_cg->css. So we should release the reference held in hugetlb_reserve_pages()
6687 if (!vma || vma->vm_flags & VM_MAYSHARE) in hugetlb_reserve_pages()
6694 kref_put(&resv_map->refs, resv_map_release); in hugetlb_reserve_pages()
6722 spin_lock(&inode->i_lock); in hugetlb_unreserve_pages()
6723 inode->i_blocks -= (blocks_per_huge_page(h) * freed); in hugetlb_unreserve_pages()
6724 spin_unlock(&inode->i_lock); in hugetlb_unreserve_pages()
6730 * Note that !resv_map implies freed == 0. So (chg - freed) in hugetlb_unreserve_pages()
6733 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); in hugetlb_unreserve_pages()
6734 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_unreserve_pages()
6744 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + in page_table_shareable()
6745 svma->vm_start; in page_table_shareable()
6750 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in page_table_shareable()
6751 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; in page_table_shareable()
6762 !svma->vm_private_data) in page_table_shareable()
6780 if (!(vma->vm_flags & VM_MAYSHARE)) in want_pmd_share()
6782 if (!vma->vm_private_data) /* vma lock required for sharing */ in want_pmd_share()
6797 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), in adjust_range_if_pmd_sharing_possible()
6798 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); in adjust_range_if_pmd_sharing_possible()
6804 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || in adjust_range_if_pmd_sharing_possible()
6818 return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) && in __vma_shareable_flags_pmd()
6819 vma->vm_private_data; in __vma_shareable_flags_pmd()
6825 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_lock_read()
6827 down_read(&vma_lock->rw_sema); in hugetlb_vma_lock_read()
6834 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_unlock_read()
6836 up_read(&vma_lock->rw_sema); in hugetlb_vma_unlock_read()
6843 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_lock_write()
6845 down_write(&vma_lock->rw_sema); in hugetlb_vma_lock_write()
6852 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_unlock_write()
6854 up_write(&vma_lock->rw_sema); in hugetlb_vma_unlock_write()
6860 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_trylock_write()
6865 return down_write_trylock(&vma_lock->rw_sema); in hugetlb_vma_trylock_write()
6871 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_assert_locked()
6873 lockdep_assert_held(&vma_lock->rw_sema); in hugetlb_vma_assert_locked()
6887 struct vm_area_struct *vma = vma_lock->vma; in __hugetlb_vma_unlock_write_put()
6892 * Semaphore synchronizes access to vma_lock->vma field. in __hugetlb_vma_unlock_write_put()
6894 vma_lock->vma = NULL; in __hugetlb_vma_unlock_write_put()
6895 vma->vm_private_data = NULL; in __hugetlb_vma_unlock_write_put()
6896 up_write(&vma_lock->rw_sema); in __hugetlb_vma_unlock_write_put()
6897 kref_put(&vma_lock->refs, hugetlb_vma_lock_release); in __hugetlb_vma_unlock_write_put()
6903 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in __hugetlb_vma_unlock_write_free()
6917 if (vma->vm_private_data) { in hugetlb_vma_lock_free()
6918 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_lock_free()
6920 down_write(&vma_lock->rw_sema); in hugetlb_vma_lock_free()
6930 if (!vma || !(vma->vm_flags & VM_MAYSHARE)) in hugetlb_vma_lock_alloc()
6933 /* Should never get here with non-NULL vm_private_data */ in hugetlb_vma_lock_alloc()
6934 if (vma->vm_private_data) in hugetlb_vma_lock_alloc()
6953 kref_init(&vma_lock->refs); in hugetlb_vma_lock_alloc()
6954 init_rwsem(&vma_lock->rw_sema); in hugetlb_vma_lock_alloc()
6955 vma_lock->vma = vma; in hugetlb_vma_lock_alloc()
6956 vma->vm_private_data = vma_lock; in hugetlb_vma_lock_alloc()
6964 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
6971 struct address_space *mapping = vma->vm_file->f_mapping; in huge_pmd_share()
6972 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + in huge_pmd_share()
6973 vma->vm_pgoff; in huge_pmd_share()
6981 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { in huge_pmd_share()
6987 spte = huge_pte_offset(svma->vm_mm, saddr, in huge_pmd_share()
7033 i_mmap_assert_write_locked(vma->vm_file->f_mapping); in huge_pmd_unshare()
7142 * huge_pte_offset() - Walk the page table to resolve the hugepage
7167 /* must be pud huge, non-present or none */ in huge_pte_offset()
7174 /* must be pmd huge, non-present or none */ in huge_pte_offset()
7180 * page in a page table page mapping size. Used to skip non-present
7190 return P4D_SIZE - PUD_SIZE; in hugetlb_mask_last_page()
7192 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page()
7204 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page()
7219 return ERR_PTR(-EINVAL); in follow_huge_addr()
7234 struct mm_struct *mm = vma->vm_mm; in follow_huge_pmd_pte()
7339 ret = -EBUSY; in isolate_hugetlb()
7343 list_move_tail(&page->lru, list); in isolate_hugetlb()
7362 ret = -EBUSY; in get_hwpoison_huge_page()
7382 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); in putback_active_hugepage()
7400 * Also note that we have to transfer the per-node surplus state in move_hugetlb_state()
7402 * the per-node's. in move_hugetlb_state()
7412 * There is no need to transfer the per-node surplus state in move_hugetlb_state()
7418 if (h->surplus_huge_pages_node[old_nid]) { in move_hugetlb_state()
7419 h->surplus_huge_pages_node[old_nid]--; in move_hugetlb_state()
7420 h->surplus_huge_pages_node[new_nid]++; in move_hugetlb_state()
7434 struct mm_struct *mm = vma->vm_mm; in hugetlb_unshare_all_pmds()
7440 if (!(vma->vm_flags & VM_MAYSHARE)) in hugetlb_unshare_all_pmds()
7443 start = ALIGN(vma->vm_start, PUD_SIZE); in hugetlb_unshare_all_pmds()
7444 end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); in hugetlb_unshare_all_pmds()
7458 i_mmap_lock_write(vma->vm_file->f_mapping); in hugetlb_unshare_all_pmds()
7468 i_mmap_unlock_write(vma->vm_file->f_mapping); in hugetlb_unshare_all_pmds()
7536 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; in hugetlb_cma_reserve()
7544 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; in hugetlb_cma_reserve()
7583 size = min(per_node, hugetlb_cma_size - reserved); in hugetlb_cma_reserve()