Home
last modified time | relevance | path

Searched refs:nr_pages (Results 1 – 25 of 369) sorted by relevance

12345678910>>...15

/Linux-v5.4/mm/
Dpage_counter.c55 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) in page_counter_cancel() argument
59 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel()
72 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) in page_counter_charge() argument
79 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_charge()
100 unsigned long nr_pages, in page_counter_try_charge() argument
121 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_try_charge()
123 atomic_long_sub(nr_pages, &c->usage); in page_counter_try_charge()
145 page_counter_cancel(c, nr_pages); in page_counter_try_charge()
155 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) in page_counter_uncharge() argument
160 page_counter_cancel(c, nr_pages); in page_counter_uncharge()
[all …]
Dpercpu-km.c49 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_create_chunk() local
59 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk()
65 for (i = 0; i < nr_pages; i++) in pcpu_create_chunk()
72 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk()
83 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_destroy_chunk() local
92 __free_pages(chunk->data, order_base_2(nr_pages)); in pcpu_destroy_chunk()
103 size_t nr_pages, alloc_pages; in pcpu_verify_alloc_info() local
111 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; in pcpu_verify_alloc_info()
112 alloc_pages = roundup_pow_of_two(nr_pages); in pcpu_verify_alloc_info()
114 if (alloc_pages > nr_pages) in pcpu_verify_alloc_info()
[all …]
Dmemory_hotplug.c227 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local
231 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; in register_page_bootmem_info_node()
234 for (i = 0; i < nr_pages; i++, page++) in register_page_bootmem_info_node()
254 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, in check_pfn_span() argument
273 || !IS_ALIGNED(nr_pages, min_align)) { in check_pfn_span()
275 reason, pfn, pfn + nr_pages - 1); in check_pfn_span()
287 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument
299 || vmem_altmap_offset(altmap) > nr_pages) { in __add_pages()
306 err = check_pfn_span(pfn, nr_pages, "add"); in __add_pages()
311 end_sec = pfn_to_section_nr(pfn + nr_pages - 1); in __add_pages()
[all …]
Dsparse.c223 unsigned long nr_pages) in subsection_mask_set() argument
226 int end = subsection_map_index(pfn + nr_pages - 1); in subsection_mask_set()
231 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument
233 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); in subsection_map_init()
236 if (!nr_pages) in subsection_map_init()
243 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init()
253 nr_pages -= pfns; in subsection_map_init()
452 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in __populate_section_memmap() argument
651 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in populate_section_memmap() argument
653 return __populate_section_memmap(pfn, nr_pages, nid, altmap); in populate_section_memmap()
[all …]
Dhugetlb_cgroup.c128 unsigned int nr_pages; in hugetlb_cgroup_move_parent() local
142 nr_pages = compound_nr(page); in hugetlb_cgroup_move_parent()
146 page_counter_charge(&parent->hugepage[idx], nr_pages); in hugetlb_cgroup_move_parent()
150 page_counter_cancel(counter, nr_pages); in hugetlb_cgroup_move_parent()
181 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument
205 if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter)) in hugetlb_cgroup_charge_cgroup()
214 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, in hugetlb_cgroup_commit_charge() argument
228 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, in hugetlb_cgroup_uncharge_page() argument
240 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages); in hugetlb_cgroup_uncharge_page()
244 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_uncharge_cgroup() argument
[all …]
Dpage_ext.c139 unsigned long nr_pages; in alloc_node_page_ext() local
141 nr_pages = NODE_DATA(nid)->node_spanned_pages; in alloc_node_page_ext()
142 if (!nr_pages) in alloc_node_page_ext()
152 nr_pages += MAX_ORDER_NR_PAGES; in alloc_node_page_ext()
154 table_size = page_ext_size * nr_pages; in alloc_node_page_ext()
287 unsigned long nr_pages, in online_page_ext() argument
294 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext()
322 unsigned long nr_pages, int nid) in offline_page_ext() argument
327 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext()
344 mn->nr_pages, mn->status_change_nid); in page_ext_callback()
[all …]
Dgup.c780 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument
788 if (!nr_pages) in __get_user_pages()
827 &start, &nr_pages, i, in __get_user_pages()
883 if (page_increm > nr_pages) in __get_user_pages()
884 page_increm = nr_pages; in __get_user_pages()
887 nr_pages -= page_increm; in __get_user_pages()
888 } while (nr_pages); in __get_user_pages()
1001 unsigned long nr_pages, in __get_user_pages_locked() argument
1023 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1032 BUG_ON(ret >= nr_pages); in __get_user_pages_locked()
[all …]
Dmemcontrol.c586 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess() local
590 if (nr_pages > soft_limit) in soft_limit_excess()
591 excess = nr_pages - soft_limit; in soft_limit_excess()
834 bool compound, int nr_pages) in mem_cgroup_charge_statistics() argument
841 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages); in mem_cgroup_charge_statistics()
843 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages); in mem_cgroup_charge_statistics()
845 __mod_memcg_state(memcg, NR_SHMEM, nr_pages); in mem_cgroup_charge_statistics()
850 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages); in mem_cgroup_charge_statistics()
854 if (nr_pages > 0) in mem_cgroup_charge_statistics()
858 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics()
[all …]
Dpage_isolation.c40 arg.nr_pages = pageblock_nr_pages; in set_migratetype_isolate()
73 unsigned long nr_pages; in set_migratetype_isolate() local
78 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, in set_migratetype_isolate()
81 __mod_zone_freepage_state(zone, -nr_pages, mt); in set_migratetype_isolate()
93 unsigned long flags, nr_pages; in unset_migratetype_isolate() local
133 nr_pages = move_freepages_block(zone, page, migratetype, NULL); in unset_migratetype_isolate()
134 __mod_zone_freepage_state(zone, nr_pages, migratetype); in unset_migratetype_isolate()
147 __first_valid_page(unsigned long pfn, unsigned long nr_pages) in __first_valid_page() argument
151 for (i = 0; i < nr_pages; i++) { in __first_valid_page()
Dprocess_vm_access.c84 unsigned long nr_pages; in process_vm_rw_single_vec() local
93 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec()
98 while (!rc && nr_pages && iov_iter_count(iter)) { in process_vm_rw_single_vec()
99 int pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec()
125 nr_pages -= pages; in process_vm_rw_single_vec()
162 unsigned long nr_pages = 0; in process_vm_rw_core() local
178 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core()
182 if (nr_pages == 0) in process_vm_rw_core()
185 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { in process_vm_rw_core()
189 sizeof(struct pages *)*nr_pages), in process_vm_rw_core()
/Linux-v5.4/drivers/media/v4l2-core/
Dvideobuf-dma-sg.c63 int nr_pages) in videobuf_vmalloc_to_sg() argument
69 sglist = vzalloc(array_size(nr_pages, sizeof(*sglist))); in videobuf_vmalloc_to_sg()
72 sg_init_table(sglist, nr_pages); in videobuf_vmalloc_to_sg()
73 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { in videobuf_vmalloc_to_sg()
93 int nr_pages, int offset, size_t size) in videobuf_pages_to_sg() argument
100 sglist = vmalloc(array_size(nr_pages, sizeof(*sglist))); in videobuf_pages_to_sg()
103 sg_init_table(sglist, nr_pages); in videobuf_pages_to_sg()
111 for (i = 1; i < nr_pages; i++) { in videobuf_pages_to_sg()
174 dma->nr_pages = last-first+1; in videobuf_dma_init_user_locked()
175 dma->pages = kmalloc_array(dma->nr_pages, sizeof(struct page *), in videobuf_dma_init_user_locked()
[all …]
/Linux-v5.4/include/linux/
Dpage_counter.h50 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
51 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
53 unsigned long nr_pages,
55 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
56 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
57 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
58 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
60 unsigned long *nr_pages);
Dhugetlb_cgroup.h54 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
56 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
59 extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
61 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
85 hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument
92 hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, in hugetlb_cgroup_commit_charge() argument
99 hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) in hugetlb_cgroup_uncharge_page() argument
104 hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_uncharge_cgroup() argument
Dmemory_hotplug.h94 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
129 unsigned long nr_pages, struct vmem_altmap *altmap);
132 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
137 unsigned long nr_pages, struct mhp_restrictions *restrictions) in add_pages() argument
139 return __add_pages(nid, start_pfn, nr_pages, restrictions); in add_pages()
142 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
314 extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
316 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
322 unsigned long nr_pages) in is_mem_section_removable() argument
329 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) in offline_pages() argument
[all …]
Dmm_inline.h28 int nr_pages) in __update_lru_size() argument
32 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); in __update_lru_size()
34 NR_ZONE_LRU_BASE + lru, nr_pages); in __update_lru_size()
39 int nr_pages) in update_lru_size() argument
41 __update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size()
43 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size()
/Linux-v5.4/drivers/xen/
Dballoon.c413 static enum bp_state increase_reservation(unsigned long nr_pages) in increase_reservation() argument
419 if (nr_pages > ARRAY_SIZE(frame_list)) in increase_reservation()
420 nr_pages = ARRAY_SIZE(frame_list); in increase_reservation()
423 for (i = 0; i < nr_pages; i++) { in increase_reservation()
425 nr_pages = i; in increase_reservation()
433 rc = xenmem_reservation_increase(nr_pages, frame_list); in increase_reservation()
452 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) in decrease_reservation() argument
460 if (nr_pages > ARRAY_SIZE(frame_list)) in decrease_reservation()
461 nr_pages = ARRAY_SIZE(frame_list); in decrease_reservation()
463 for (i = 0; i < nr_pages; i++) { in decrease_reservation()
[all …]
/Linux-v5.4/drivers/misc/mic/scif/
Dscif_rma.c81 scif_create_pinned_pages(int nr_pages, int prot) in scif_create_pinned_pages() argument
90 pin->pages = scif_zalloc(nr_pages * sizeof(*pin->pages)); in scif_create_pinned_pages()
116 for (j = 0; j < pin->nr_pages; j++) { in scif_destroy_pinned_pages()
125 pin->nr_pages * sizeof(*pin->pages)); in scif_destroy_pinned_pages()
139 struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages, in scif_create_window() argument
149 window->dma_addr = scif_zalloc(nr_pages * sizeof(*window->dma_addr)); in scif_create_window()
153 window->num_pages = scif_zalloc(nr_pages * sizeof(*window->num_pages)); in scif_create_window()
171 nr_pages * sizeof(*window->dma_addr)); in scif_create_window()
188 int nr_pages = window->nr_pages; in scif_destroy_incomplete_window() local
213 scif_free(window->dma_addr, nr_pages * sizeof(*window->dma_addr)); in scif_destroy_incomplete_window()
[all …]
Dscif_mmap.c34 req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT; in scif_recv_munmap()
48 scif_put_window(window, window->nr_pages); in scif_recv_munmap()
130 scif_put_window(window, window->nr_pages); in __scif_cleanup_rma_for_zombies()
218 int nr_pages, err, i; in scif_get_pages() local
233 nr_pages = len >> PAGE_SHIFT; in scif_get_pages()
259 (*pages)->phys_addr = scif_zalloc(nr_pages * sizeof(dma_addr_t)); in scif_get_pages()
267 ((*pages)->va = scif_zalloc(nr_pages * sizeof(void *))); in scif_get_pages()
275 (*pages)->nr_pages = nr_pages; in scif_get_pages()
278 for (i = 0; i < nr_pages; i++) { in scif_get_pages()
291 scif_get_window(window, nr_pages); in scif_get_pages()
[all …]
/Linux-v5.4/fs/iomap/
Dswapfile.c19 unsigned long nr_pages; /* number of pages collected */ member
32 unsigned long nr_pages; in iomap_swapfile_add_extent() local
49 nr_pages = next_ppage - first_ppage; in iomap_swapfile_add_extent()
65 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); in iomap_swapfile_add_extent()
69 isi->nr_pages += nr_pages; in iomap_swapfile_add_extent()
173 sis->max = isi.nr_pages; in iomap_swapfile_activate()
174 sis->pages = isi.nr_pages - 1; in iomap_swapfile_activate()
175 sis->highest_bit = isi.nr_pages - 1; in iomap_swapfile_activate()
/Linux-v5.4/net/rds/
Dinfo.c163 unsigned long nr_pages = 0; in rds_info_getsockopt() local
188 nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK)) in rds_info_getsockopt()
191 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt()
196 ret = get_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt()
197 if (ret != nr_pages) { in rds_info_getsockopt()
199 nr_pages = ret; in rds_info_getsockopt()
201 nr_pages = 0; in rds_info_getsockopt()
206 rdsdebug("len %d nr_pages %lu\n", len, nr_pages); in rds_info_getsockopt()
238 for (i = 0; pages && i < nr_pages; i++) in rds_info_getsockopt()
Drdma.c156 static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, in rds_pin_pages() argument
161 ret = get_user_pages_fast(user_addr, nr_pages, write ? FOLL_WRITE : 0, in rds_pin_pages()
164 if (ret >= 0 && ret < nr_pages) { in rds_pin_pages()
178 unsigned int nr_pages; in __rds_rdma_map() local
198 nr_pages = rds_pages_in_vec(&args->vec); in __rds_rdma_map()
199 if (nr_pages == 0) { in __rds_rdma_map()
207 if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) { in __rds_rdma_map()
213 args->vec.addr, args->vec.bytes, nr_pages); in __rds_rdma_map()
216 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in __rds_rdma_map()
250 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); in __rds_rdma_map()
[all …]
/Linux-v5.4/kernel/events/
Dring_buffer.c174 if (rb->nr_pages) in __perf_output_begin()
234 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin()
328 if (!rb->nr_pages) in ring_buffer_init()
630 pgoff_t pgoff, int nr_pages, long watermark, int flags) in rb_alloc_aux() argument
643 max_order = ilog2(nr_pages); in rb_alloc_aux()
656 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL, in rb_alloc_aux()
662 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { in rb_alloc_aux()
666 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); in rb_alloc_aux()
690 rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages, in rb_alloc_aux()
709 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1); in rb_alloc_aux()
[all …]
/Linux-v5.4/arch/powerpc/platforms/powernv/
Dmemtrace.c71 static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) in memtrace_offline_pages() argument
74 const unsigned long size = PFN_PHYS(nr_pages); in memtrace_offline_pages()
82 if (offline_pages(start_pfn, nr_pages)) { in memtrace_offline_pages()
97 u64 start_pfn, end_pfn, nr_pages, pfn; in memtrace_alloc_node() local
106 nr_pages = size >> PAGE_SHIFT; in memtrace_alloc_node()
109 end_pfn = round_down(end_pfn - nr_pages, nr_pages); in memtrace_alloc_node()
112 for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) { in memtrace_alloc_node()
113 if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) { in memtrace_alloc_node()
120 end_pfn = base_pfn + nr_pages; in memtrace_alloc_node()
/Linux-v5.4/tools/testing/selftests/vm/
Duserfaultfd.c62 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; variable
132 if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED)) { in anon_release_pages()
142 if (posix_memalign(alloc_area, page_size, nr_pages * page_size)) { in anon_allocate_area()
159 nr_pages * page_size, in hugetlb_release_pages()
160 nr_pages * page_size)) { in hugetlb_release_pages()
173 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in hugetlb_allocate_area()
177 nr_pages * page_size); in hugetlb_allocate_area()
184 area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in hugetlb_allocate_area()
187 nr_pages * page_size); in hugetlb_allocate_area()
189 if (munmap(*alloc_area, nr_pages * page_size) < 0) in hugetlb_allocate_area()
[all …]
/Linux-v5.4/drivers/staging/android/ion/
Dion_cma_heap.c35 unsigned long nr_pages = size >> PAGE_SHIFT; in ion_cma_allocate() local
42 pages = cma_alloc(cma_heap->cma, nr_pages, align, false); in ion_cma_allocate()
47 unsigned long nr_clear_pages = nr_pages; in ion_cma_allocate()
79 cma_release(cma_heap->cma, pages, nr_pages); in ion_cma_allocate()
87 unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; in ion_cma_free() local
90 cma_release(cma_heap->cma, pages, nr_pages); in ion_cma_free()

12345678910>>...15