/Linux-v5.10/drivers/media/pci/ivtv/ |
D | ivtv-udma.c | 21 dma_page->page_count = dma_page->last - dma_page->first + 1; in ivtv_udma_get_page_info() 22 if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset; in ivtv_udma_get_page_info() 36 for (i = 0; i < dma_page->page_count; i++) { in ivtv_udma_fill_sg_list() 37 unsigned int len = (i == dma_page->page_count - 1) ? in ivtv_udma_fill_sg_list() 100 if (dma->SG_length || dma->page_count) { in ivtv_udma_setup() 102 dma->SG_length, dma->page_count); in ivtv_udma_setup() 108 if (user_dma.page_count <= 0) { in ivtv_udma_setup() 110 user_dma.page_count, size_in_bytes, user_dma.offset); in ivtv_udma_setup() 115 err = pin_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, in ivtv_udma_setup() 118 if (user_dma.page_count != err) { in ivtv_udma_setup() [all …]
|
D | ivtv-yuv.c | 54 if (dma->SG_length || dma->page_count) { in ivtv_yuv_prep_user_dma() 57 dma->SG_length, dma->page_count); in ivtv_yuv_prep_user_dma() 66 y_dma.page_count, &dma->map[0], FOLL_FORCE); in ivtv_yuv_prep_user_dma() 68 if (y_pages == y_dma.page_count) { in ivtv_yuv_prep_user_dma() 70 uv_dma.page_count, &dma->map[y_pages], in ivtv_yuv_prep_user_dma() 74 if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { in ivtv_yuv_prep_user_dma() 77 if (y_pages == y_dma.page_count) { in ivtv_yuv_prep_user_dma() 80 uv_pages, uv_dma.page_count); in ivtv_yuv_prep_user_dma() 91 y_pages, y_dma.page_count); in ivtv_yuv_prep_user_dma() 107 dma->page_count = y_pages + uv_pages; in ivtv_yuv_prep_user_dma() [all …]
|
/Linux-v5.10/drivers/gpu/drm/nouveau/ |
D | nouveau_bo85b5.c | 47 u32 page_count = new_reg->num_pages; in nva3_bo_move_copy() local 50 page_count = new_reg->num_pages; in nva3_bo_move_copy() 51 while (page_count) { in nva3_bo_move_copy() 52 int line_count = (page_count > 8191) ? 8191 : page_count; in nva3_bo_move_copy() 68 page_count -= line_count; in nva3_bo_move_copy()
|
D | nouveau_bo90b5.c | 40 u32 page_count = new_reg->num_pages; in nvc0_bo_move_copy() local 43 page_count = new_reg->num_pages; in nvc0_bo_move_copy() 44 while (page_count) { in nvc0_bo_move_copy() 45 int line_count = (page_count > 8191) ? 8191 : page_count; in nvc0_bo_move_copy() 61 page_count -= line_count; in nvc0_bo_move_copy()
|
D | nouveau_bo9039.c | 45 u32 page_count = new_reg->num_pages; in nvc0_bo_move_m2mf() local 48 page_count = new_reg->num_pages; in nvc0_bo_move_m2mf() 49 while (page_count) { in nvc0_bo_move_m2mf() 50 int line_count = (page_count > 2047) ? 2047 : page_count; in nvc0_bo_move_m2mf() 78 page_count -= line_count; in nvc0_bo_move_m2mf()
|
D | nouveau_bo0039.c | 55 u32 page_count = new_reg->num_pages; in nv04_bo_move_m2mf() local 65 page_count = new_reg->num_pages; in nv04_bo_move_m2mf() 66 while (page_count) { in nv04_bo_move_m2mf() 67 int line_count = (page_count > 2047) ? 2047 : page_count; in nv04_bo_move_m2mf() 88 page_count -= line_count; in nv04_bo_move_m2mf()
|
/Linux-v5.10/drivers/firewire/ |
D | core-iso.c | 29 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) in fw_iso_buffer_alloc() argument 33 buffer->page_count = 0; in fw_iso_buffer_alloc() 35 buffer->pages = kmalloc_array(page_count, sizeof(buffer->pages[0]), in fw_iso_buffer_alloc() 40 for (i = 0; i < page_count; i++) { in fw_iso_buffer_alloc() 45 buffer->page_count = i; in fw_iso_buffer_alloc() 46 if (i < page_count) { in fw_iso_buffer_alloc() 62 for (i = 0; i < buffer->page_count; i++) { in fw_iso_buffer_map_dma() 71 if (i < buffer->page_count) in fw_iso_buffer_map_dma() 78 int page_count, enum dma_data_direction direction) in fw_iso_buffer_init() argument 82 ret = fw_iso_buffer_alloc(buffer, page_count); in fw_iso_buffer_init() [all …]
|
/Linux-v5.10/drivers/char/agp/ |
D | generic.c | 181 if (curr->page_count != 0) { in agp_free_memory() 186 for (i = 0; i < curr->page_count; i++) { in agp_free_memory() 191 for (i = 0; i < curr->page_count; i++) { in agp_free_memory() 219 size_t page_count, u32 type) in agp_allocate_memory() argument 230 if ((cur_memory + page_count > bridge->max_memory_agp) || in agp_allocate_memory() 231 (cur_memory + page_count < page_count)) in agp_allocate_memory() 235 new = agp_generic_alloc_user(page_count, type); in agp_allocate_memory() 242 new = bridge->driver->alloc_by_type(page_count, type); in agp_allocate_memory() 248 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; in agp_allocate_memory() 256 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { in agp_allocate_memory() [all …]
|
D | i460-agp.c | 311 if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { in i460_insert_memory_small_io_page() 317 while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { in i460_insert_memory_small_io_page() 327 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { in i460_insert_memory_small_io_page() 346 for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) in i460_remove_memory_small_io_page() 415 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_insert_memory_large_io_page() 417 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_insert_memory_large_io_page() 473 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_remove_memory_large_io_page() 475 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_remove_memory_large_io_page()
|
D | nvidia-agp.c | 210 if (mem->page_count == 0) in nvidia_insert_memory() 213 if ((pg_start + mem->page_count) > in nvidia_insert_memory() 217 for (j = pg_start; j < (pg_start + mem->page_count); j++) { in nvidia_insert_memory() 226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in nvidia_insert_memory() 250 if (mem->page_count == 0) in nvidia_remove_memory() 253 for (i = pg_start; i < (mem->page_count + pg_start); i++) in nvidia_remove_memory()
|
D | intel-gtt.c | 127 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); in intel_gtt_unmap_memory() 217 if ((pg_start + mem->page_count) in i810_insert_dcache_entries() 224 for (i = pg_start; i < (pg_start + mem->page_count); i++) { in i810_insert_dcache_entries() 269 new->page_count = pg_count; in alloc_agpphysmem_i8xx() 280 if (curr->page_count == 4) in intel_i810_free_by_type() 914 if (mem->page_count == 0) in intel_fake_agp_insert_entries() 917 if (pg_start + mem->page_count > intel_private.gtt_total_entries) in intel_fake_agp_insert_entries() 932 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); in intel_fake_agp_insert_entries() 940 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, in intel_fake_agp_insert_entries() 967 if (mem->page_count == 0) in intel_fake_agp_remove_entries() [all …]
|
D | ati-agp.c | 282 if (mem->page_count == 0) in ati_insert_memory() 285 if ((pg_start + mem->page_count) > num_entries) in ati_insert_memory() 289 while (j < (pg_start + mem->page_count)) { in ati_insert_memory() 303 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in ati_insert_memory() 328 if (mem->page_count == 0) in ati_remove_memory() 331 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in ati_remove_memory()
|
D | efficeon-agp.c | 238 int i, count = mem->page_count, num_entries; in efficeon_insert_memory() 246 if ((pg_start + mem->page_count) > num_entries) in efficeon_insert_memory() 287 int i, count = mem->page_count, num_entries; in efficeon_remove_memory() 293 if ((pg_start + mem->page_count) > num_entries) in efficeon_remove_memory()
|
D | uninorth-agp.c | 165 if (mem->page_count == 0) in uninorth_insert_memory() 171 if ((pg_start + mem->page_count) > num_entries) in uninorth_insert_memory() 175 for (i = 0; i < mem->page_count; ++i) { in uninorth_insert_memory() 184 for (i = 0; i < mem->page_count; i++) { in uninorth_insert_memory() 214 if (mem->page_count == 0) in uninorth_remove_memory() 218 for (i = 0; i < mem->page_count; ++i) { in uninorth_remove_memory()
|
D | agp.h | 199 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type); 203 struct agp_memory *memory, size_t page_count); 220 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
|
/Linux-v5.10/drivers/target/ |
D | target_core_rd.c | 68 u32 i, j, page_count = 0, sg_per_table; in rd_release_sgl_table() local 78 page_count++; in rd_release_sgl_table() 85 return page_count; in rd_release_sgl_table() 90 u32 page_count; in rd_release_device_space() local 95 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, in rd_release_device_space() 100 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, in rd_release_device_space() 101 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); in rd_release_device_space() 216 u32 page_count; in rd_release_prot_space() local 221 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, in rd_release_prot_space() 226 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, in rd_release_prot_space() [all …]
|
/Linux-v5.10/drivers/staging/kpc2000/kpc_dma/ |
D | fileops.c | 66 acd->page_count = count_pages(iov_base, iov_len); in kpc_dma_transfer() 69 acd->user_pages = kcalloc(acd->page_count, sizeof(struct page *), in kpc_dma_transfer() 79 rv = pin_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE, acd->user_pages, NULL); in kpc_dma_transfer() 81 if (rv != acd->page_count) { in kpc_dma_transfer() 89 nr_pages = acd->page_count; in kpc_dma_transfer() 92 …rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE … in kpc_dma_transfer() 219 for (i = 0 ; i < acd->page_count ; i++) { in transfer_complete_cb() 224 unpin_user_pages(acd->user_pages, acd->page_count); in transfer_complete_cb()
|
/Linux-v5.10/fs/squashfs/ |
D | block.c | 86 const int page_count = DIV_ROUND_UP(total_len + offset, PAGE_SIZE); in squashfs_bio_read() local 90 if (page_count <= BIO_MAX_PAGES) in squashfs_bio_read() 91 bio = bio_alloc(GFP_NOIO, page_count); in squashfs_bio_read() 93 bio = bio_kmalloc(GFP_NOIO, page_count); in squashfs_bio_read() 102 for (i = 0; i < page_count; ++i) { in squashfs_bio_read()
|
/Linux-v5.10/fs/btrfs/ |
D | scrub.c | 97 int page_count; member 104 int page_count; member 549 for (i = 0; i < sbio->page_count; i++) { in scrub_free_ctx() 600 sbio->page_count = 0; in scrub_setup_ctx() 745 WARN_ON(sblock->page_count < 1); in scrub_print_warning() 841 BUG_ON(sblock_to_check->page_count < 1); in scrub_handle_errored_block() 1028 if (!sblocks_for_recheck[mirror_index].page_count) in scrub_handle_errored_block() 1039 if (!sblocks_for_recheck[1].page_count) in scrub_handle_errored_block() 1093 for (page_num = 0; page_num < sblock_bad->page_count; in scrub_handle_errored_block() 1115 sblocks_for_recheck[mirror_index].page_count > 0; in scrub_handle_errored_block() [all …]
|
/Linux-v5.10/fs/orangefs/ |
D | orangefs-bufmap.c | 152 int page_count; member 171 unpin_user_pages(bufmap->page_array, bufmap->page_count); in orangefs_bufmap_unmap() 240 bufmap->page_count = bufmap->total_size / PAGE_SIZE; in orangefs_bufmap_alloc() 244 kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL); in orangefs_bufmap_alloc() 269 bufmap->page_count, FOLL_WRITE, bufmap->page_array); in orangefs_bufmap_map() 274 if (ret != bufmap->page_count) { in orangefs_bufmap_map() 276 bufmap->page_count, ret); in orangefs_bufmap_map() 291 for (i = 0; i < bufmap->page_count; i++) in orangefs_bufmap_map()
|
/Linux-v5.10/tools/testing/radix-tree/ |
D | regression2.c | 61 unsigned long page_count = 0; variable 71 p->index = page_count++; in page_alloc()
|
/Linux-v5.10/drivers/virt/vboxguest/ |
D | vboxguest_utils.c | 199 u32 page_count; in hgcm_call_add_pagelist_size() local 201 page_count = hgcm_call_buf_size_in_pages(buf, len); in hgcm_call_add_pagelist_size() 202 *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); in hgcm_call_add_pagelist_size() 337 u32 i, page_count; in hgcm_call_init_linaddr() local 348 page_count = hgcm_call_buf_size_in_pages(buf, len); in hgcm_call_init_linaddr() 356 dst_pg_lst->page_count = page_count; in hgcm_call_init_linaddr() 358 for (i = 0; i < page_count; i++) { in hgcm_call_init_linaddr() 368 *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); in hgcm_call_init_linaddr()
|
/Linux-v5.10/drivers/gpu/drm/ttm/ |
D | ttm_agp_backend.c | 67 mem->page_count = 0; in ttm_agp_bind() 74 mem->pages[mem->page_count++] = page; in ttm_agp_bind()
|
/Linux-v5.10/drivers/gpu/drm/i915/gem/ |
D | i915_gem_shmem.c | 32 const unsigned long page_count = obj->base.size / PAGE_SIZE; in shmem_get_pages() local 65 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { in shmem_get_pages() 84 for (i = 0; i < page_count; i++) { in shmem_get_pages() 102 i915_gem_shrink(i915, 2 * page_count, NULL, *s++); in shmem_get_pages() 177 page_count); in shmem_get_pages()
|
/Linux-v5.10/drivers/net/wireless/mediatek/mt76/mt7603/ |
D | init.c | 43 int page_count; in mt7603_dma_sched_init() local 54 page_count = mt76_get_field(dev, MT_PSE_FC_P0, in mt7603_dma_sched_init() 72 mt76_wr(dev, MT_SCH_1, page_count | (2 << 28)); in mt7603_dma_sched_init() 91 mt76_wr(dev, MT_RSV_MAX_THRESH, page_count - reserved_count); in mt7603_dma_sched_init() 95 page_count - beacon_pages - mcu_pages); in mt7603_dma_sched_init() 101 mt76_wr(dev, MT_GROUP_THRESH(0), page_count); in mt7603_dma_sched_init()
|