Home
last modified time | relevance | path

Searched refs:page_count (Results 1 – 25 of 124) sorted by relevance

12345

/Linux-v5.4/drivers/media/pci/ivtv/
Divtv-udma.c21 dma_page->page_count = dma_page->last - dma_page->first + 1; in ivtv_udma_get_page_info()
22 if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset; in ivtv_udma_get_page_info()
36 for (i = 0; i < dma_page->page_count; i++) { in ivtv_udma_fill_sg_list()
37 unsigned int len = (i == dma_page->page_count - 1) ? in ivtv_udma_fill_sg_list()
100 if (dma->SG_length || dma->page_count) { in ivtv_udma_setup()
102 dma->SG_length, dma->page_count); in ivtv_udma_setup()
108 if (user_dma.page_count <= 0) { in ivtv_udma_setup()
110 user_dma.page_count, size_in_bytes, user_dma.offset); in ivtv_udma_setup()
115 err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, in ivtv_udma_setup()
118 if (user_dma.page_count != err) { in ivtv_udma_setup()
[all …]
Divtv-yuv.c55 if (dma->SG_length || dma->page_count) { in ivtv_yuv_prep_user_dma()
58 dma->SG_length, dma->page_count); in ivtv_yuv_prep_user_dma()
67 y_dma.page_count, &dma->map[0], FOLL_FORCE); in ivtv_yuv_prep_user_dma()
69 if (y_pages == y_dma.page_count) { in ivtv_yuv_prep_user_dma()
71 uv_dma.page_count, &dma->map[y_pages], in ivtv_yuv_prep_user_dma()
75 if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { in ivtv_yuv_prep_user_dma()
78 if (y_pages == y_dma.page_count) { in ivtv_yuv_prep_user_dma()
81 uv_pages, uv_dma.page_count); in ivtv_yuv_prep_user_dma()
93 y_pages, y_dma.page_count); in ivtv_yuv_prep_user_dma()
110 dma->page_count = y_pages + uv_pages; in ivtv_yuv_prep_user_dma()
[all …]
/Linux-v5.4/drivers/firewire/
Dcore-iso.c29 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) in fw_iso_buffer_alloc() argument
33 buffer->page_count = 0; in fw_iso_buffer_alloc()
35 buffer->pages = kmalloc_array(page_count, sizeof(buffer->pages[0]), in fw_iso_buffer_alloc()
40 for (i = 0; i < page_count; i++) { in fw_iso_buffer_alloc()
45 buffer->page_count = i; in fw_iso_buffer_alloc()
46 if (i < page_count) { in fw_iso_buffer_alloc()
62 for (i = 0; i < buffer->page_count; i++) { in fw_iso_buffer_map_dma()
71 if (i < buffer->page_count) in fw_iso_buffer_map_dma()
78 int page_count, enum dma_data_direction direction) in fw_iso_buffer_init() argument
82 ret = fw_iso_buffer_alloc(buffer, page_count); in fw_iso_buffer_init()
[all …]
/Linux-v5.4/drivers/char/agp/
Dgeneric.c182 if (curr->page_count != 0) { in agp_free_memory()
187 for (i = 0; i < curr->page_count; i++) { in agp_free_memory()
192 for (i = 0; i < curr->page_count; i++) { in agp_free_memory()
219 size_t page_count, u32 type) in agp_allocate_memory() argument
230 if ((cur_memory + page_count > bridge->max_memory_agp) || in agp_allocate_memory()
231 (cur_memory + page_count < page_count)) in agp_allocate_memory()
235 new = agp_generic_alloc_user(page_count, type); in agp_allocate_memory()
242 new = bridge->driver->alloc_by_type(page_count, type); in agp_allocate_memory()
248 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; in agp_allocate_memory()
256 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { in agp_allocate_memory()
[all …]
Di460-agp.c311 if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { in i460_insert_memory_small_io_page()
317 while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { in i460_insert_memory_small_io_page()
327 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { in i460_insert_memory_small_io_page()
346 for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) in i460_remove_memory_small_io_page()
415 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_insert_memory_large_io_page()
417 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_insert_memory_large_io_page()
473 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_remove_memory_large_io_page()
475 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_remove_memory_large_io_page()
Dnvidia-agp.c210 if (mem->page_count == 0) in nvidia_insert_memory()
213 if ((pg_start + mem->page_count) > in nvidia_insert_memory()
217 for (j = pg_start; j < (pg_start + mem->page_count); j++) { in nvidia_insert_memory()
226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in nvidia_insert_memory()
250 if (mem->page_count == 0) in nvidia_remove_memory()
253 for (i = pg_start; i < (mem->page_count + pg_start); i++) in nvidia_remove_memory()
Dintel-gtt.c127 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); in intel_gtt_unmap_memory()
217 if ((pg_start + mem->page_count) in i810_insert_dcache_entries()
224 for (i = pg_start; i < (pg_start + mem->page_count); i++) { in i810_insert_dcache_entries()
269 new->page_count = pg_count; in alloc_agpphysmem_i8xx()
280 if (curr->page_count == 4) in intel_i810_free_by_type()
911 if (mem->page_count == 0) in intel_fake_agp_insert_entries()
914 if (pg_start + mem->page_count > intel_private.gtt_total_entries) in intel_fake_agp_insert_entries()
929 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); in intel_fake_agp_insert_entries()
937 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, in intel_fake_agp_insert_entries()
964 if (mem->page_count == 0) in intel_fake_agp_remove_entries()
[all …]
Dati-agp.c282 if (mem->page_count == 0) in ati_insert_memory()
285 if ((pg_start + mem->page_count) > num_entries) in ati_insert_memory()
289 while (j < (pg_start + mem->page_count)) { in ati_insert_memory()
303 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in ati_insert_memory()
328 if (mem->page_count == 0) in ati_remove_memory()
331 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in ati_remove_memory()
Defficeon-agp.c238 int i, count = mem->page_count, num_entries; in efficeon_insert_memory()
246 if ((pg_start + mem->page_count) > num_entries) in efficeon_insert_memory()
287 int i, count = mem->page_count, num_entries; in efficeon_remove_memory()
293 if ((pg_start + mem->page_count) > num_entries) in efficeon_remove_memory()
Duninorth-agp.c165 if (mem->page_count == 0) in uninorth_insert_memory()
171 if ((pg_start + mem->page_count) > num_entries) in uninorth_insert_memory()
175 for (i = 0; i < mem->page_count; ++i) { in uninorth_insert_memory()
184 for (i = 0; i < mem->page_count; i++) { in uninorth_insert_memory()
214 if (mem->page_count == 0) in uninorth_remove_memory()
218 for (i = 0; i < mem->page_count; ++i) { in uninorth_remove_memory()
Dagp.h199 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
203 struct agp_memory *memory, size_t page_count);
220 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
/Linux-v5.4/drivers/target/
Dtarget_core_rd.c68 u32 i, j, page_count = 0, sg_per_table; in rd_release_sgl_table() local
78 page_count++; in rd_release_sgl_table()
85 return page_count; in rd_release_sgl_table()
90 u32 page_count; in rd_release_device_space() local
95 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, in rd_release_device_space()
100 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, in rd_release_device_space()
101 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); in rd_release_device_space()
216 u32 page_count; in rd_release_prot_space() local
221 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, in rd_release_prot_space()
226 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, in rd_release_prot_space()
[all …]
/Linux-v5.4/drivers/staging/kpc2000/kpc_dma/
Dfileops.c68 acd->page_count = count_pages(iov_base, iov_len); in kpc_dma_transfer()
71 acd->user_pages = kzalloc(sizeof(struct page *) * acd->page_count, GFP_KERNEL); in kpc_dma_transfer()
80 …rv = get_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE | FOLL_GET, acd->user_pages… in kpc_dma_transfer()
82 if (rv != acd->page_count) { in kpc_dma_transfer()
88 …rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE-… in kpc_dma_transfer()
193 for (i = 0 ; i < acd->page_count ; i++) { in kpc_dma_transfer()
214 for (i = 0 ; i < acd->page_count ; i++) { in transfer_complete_cb()
222 for (i = 0 ; i < acd->page_count ; i++) { in transfer_complete_cb()
/Linux-v5.4/fs/orangefs/
Dorangefs-bufmap.c152 int page_count; member
173 for (i = 0; i < bufmap->page_count; i++) in orangefs_bufmap_unmap()
243 bufmap->page_count = bufmap->total_size / PAGE_SIZE; in orangefs_bufmap_alloc()
247 kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL); in orangefs_bufmap_alloc()
272 bufmap->page_count, FOLL_WRITE, bufmap->page_array); in orangefs_bufmap_map()
277 if (ret != bufmap->page_count) { in orangefs_bufmap_map()
279 bufmap->page_count, ret); in orangefs_bufmap_map()
294 for (i = 0; i < bufmap->page_count; i++) in orangefs_bufmap_map()
/Linux-v5.4/fs/btrfs/
Dscrub.c96 int page_count; member
103 int page_count; member
549 for (i = 0; i < sbio->page_count; i++) { in scrub_free_ctx()
600 sbio->page_count = 0; in scrub_setup_ctx()
746 WARN_ON(sblock->page_count < 1); in scrub_print_warning()
842 BUG_ON(sblock_to_check->page_count < 1); in scrub_handle_errored_block()
1029 if (!sblocks_for_recheck[mirror_index].page_count) in scrub_handle_errored_block()
1040 if (!sblocks_for_recheck[1].page_count) in scrub_handle_errored_block()
1094 for (page_num = 0; page_num < sblock_bad->page_count; in scrub_handle_errored_block()
1116 sblocks_for_recheck[mirror_index].page_count > 0; in scrub_handle_errored_block()
[all …]
/Linux-v5.4/drivers/gpu/drm/nouveau/
Dnouveau_bo.c789 u32 page_count = new_reg->num_pages; in nvc0_bo_move_copy() local
792 page_count = new_reg->num_pages; in nvc0_bo_move_copy()
793 while (page_count) { in nvc0_bo_move_copy()
794 int line_count = (page_count > 8191) ? 8191 : page_count; in nvc0_bo_move_copy()
812 page_count -= line_count; in nvc0_bo_move_copy()
827 u32 page_count = new_reg->num_pages; in nvc0_bo_move_m2mf() local
830 page_count = new_reg->num_pages; in nvc0_bo_move_m2mf()
831 while (page_count) { in nvc0_bo_move_m2mf()
832 int line_count = (page_count > 2047) ? 2047 : page_count; in nvc0_bo_move_m2mf()
851 page_count -= line_count; in nvc0_bo_move_m2mf()
[all …]
/Linux-v5.4/tools/testing/radix-tree/
Dregression2.c61 unsigned long page_count = 0; variable
71 p->index = page_count++; in page_alloc()
/Linux-v5.4/drivers/virt/vboxguest/
Dvboxguest_utils.c197 u32 page_count; in hgcm_call_add_pagelist_size() local
199 page_count = hgcm_call_buf_size_in_pages(buf, len); in hgcm_call_add_pagelist_size()
200 *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); in hgcm_call_add_pagelist_size()
335 u32 i, page_count; in hgcm_call_init_linaddr() local
346 page_count = hgcm_call_buf_size_in_pages(buf, len); in hgcm_call_init_linaddr()
354 dst_pg_lst->page_count = page_count; in hgcm_call_init_linaddr()
356 for (i = 0; i < page_count; i++) { in hgcm_call_init_linaddr()
366 *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); in hgcm_call_init_linaddr()
/Linux-v5.4/drivers/gpu/drm/i915/gem/
Di915_gem_shmem.c29 const unsigned long page_count = obj->base.size / PAGE_SIZE; in shmem_get_pages() local
55 if (page_count > totalram_pages()) in shmem_get_pages()
63 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { in shmem_get_pages()
82 for (i = 0; i < page_count; i++) { in shmem_get_pages()
100 i915_gem_shrink(i915, 2 * page_count, NULL, *s++); in shmem_get_pages()
175 page_count); in shmem_get_pages()
/Linux-v5.4/drivers/gpu/drm/ttm/
Dttm_agp_backend.c64 mem->page_count = 0; in ttm_agp_bind()
71 mem->pages[mem->page_count++] = page; in ttm_agp_bind()
/Linux-v5.4/drivers/gpu/drm/udl/
Dudl_dmabuf.c72 int page_count; in udl_map_dma_buf() local
90 page_count = obj->base.size / PAGE_SIZE; in udl_map_dma_buf()
91 obj->sg = drm_prime_pages_to_sg(obj->pages, page_count); in udl_map_dma_buf()
/Linux-v5.4/drivers/net/wireless/mediatek/mt76/mt7603/
Dinit.c41 int page_count; in mt7603_dma_sched_init() local
52 page_count = mt76_get_field(dev, MT_PSE_FC_P0, in mt7603_dma_sched_init()
70 mt76_wr(dev, MT_SCH_1, page_count | (2 << 28)); in mt7603_dma_sched_init()
89 mt76_wr(dev, MT_RSV_MAX_THRESH, page_count - reserved_count); in mt7603_dma_sched_init()
93 page_count - beacon_pages - mcu_pages); in mt7603_dma_sched_init()
99 mt76_wr(dev, MT_GROUP_THRESH(0), page_count); in mt7603_dma_sched_init()
/Linux-v5.4/fs/pstore/
Dram_core.c406 unsigned int page_count; in persistent_ram_vmap() local
412 page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); in persistent_ram_vmap()
419 pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); in persistent_ram_vmap()
422 __func__, page_count); in persistent_ram_vmap()
426 for (i = 0; i < page_count; i++) { in persistent_ram_vmap()
430 vaddr = vmap(pages, page_count, VM_MAP, prot); in persistent_ram_vmap()
/Linux-v5.4/fs/xfs/
Dxfs_buf.c269 int page_count) in _xfs_buf_get_pages() argument
273 bp->b_page_count = page_count; in _xfs_buf_get_pages()
274 if (page_count <= XB_PAGES) { in _xfs_buf_get_pages()
278 page_count, KM_NOFS); in _xfs_buf_get_pages()
282 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); in _xfs_buf_get_pages()
345 unsigned short page_count, i; in xfs_buf_allocate_memory() local
392 page_count = end - start; in xfs_buf_allocate_memory()
393 error = _xfs_buf_get_pages(bp, page_count); in xfs_buf_allocate_memory()
914 unsigned long page_count; in xfs_buf_get_uncached() local
924 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; in xfs_buf_get_uncached()
[all …]
/Linux-v5.4/drivers/gpu/drm/
Ddrm_bufs.c855 dma->page_count += byte_count >> PAGE_SHIFT; in drm_legacy_addbufs_agp()
890 int page_count; in drm_legacy_addbufs_pci() local
958 temp_pagelist = kmalloc_array(dma->page_count + (count << page_order), in drm_legacy_addbufs_pci()
969 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); in drm_legacy_addbufs_pci()
971 dma->page_count + (count << page_order)); in drm_legacy_addbufs_pci()
976 page_count = 0; in drm_legacy_addbufs_pci()
995 dma->page_count + page_count, in drm_legacy_addbufs_pci()
997 temp_pagelist[dma->page_count + page_count++] in drm_legacy_addbufs_pci()
1056 if (dma->page_count) { in drm_legacy_addbufs_pci()
1063 dma->page_count += entry->seg_count << page_order; in drm_legacy_addbufs_pci()
[all …]

12345