Home
last modified time | relevance | path

Searched refs:page_index (Results 1 – 25 of 40) sorted by relevance

12

/Linux-v5.4/drivers/android/
Dbinder_trace.h307 TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
308 TP_ARGS(alloc, page_index),
311 __field(size_t, page_index)
315 __entry->page_index = page_index;
318 __entry->proc, __entry->page_index)
322 TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
323 TP_ARGS(alloc, page_index));
326 TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
327 TP_ARGS(alloc, page_index));
330 TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
[all …]
Dbinder_alloc_selftest.c98 int page_index; in check_buffer_pages_allocated() local
103 page_index = (page_addr - alloc->buffer) / PAGE_SIZE; in check_buffer_pages_allocated()
104 if (!alloc->pages[page_index].page_ptr || in check_buffer_pages_allocated()
105 !list_empty(&alloc->pages[page_index].lru)) { in check_buffer_pages_allocated()
107 alloc->pages[page_index].page_ptr ? in check_buffer_pages_allocated()
108 "lru" : "free", page_index); in check_buffer_pages_allocated()
/Linux-v5.4/sound/firewire/
Dpackets-buffer.c26 unsigned int i, page_index, offset_in_page; in iso_packets_buffer_init() local
50 page_index = i / packets_per_page; in iso_packets_buffer_init()
51 p = page_address(b->iso_buffer.pages[page_index]); in iso_packets_buffer_init()
54 b->packets[i].offset = page_index * PAGE_SIZE + offset_in_page; in iso_packets_buffer_init()
/Linux-v5.4/drivers/s390/block/
Dxpram.c162 unsigned int page_index, add_bit; in xpram_highest_page_index() local
169 page_index = 0; in xpram_highest_page_index()
172 if (xpram_page_in(mem_page, page_index | add_bit) == 0) in xpram_highest_page_index()
173 page_index |= add_bit; in xpram_highest_page_index()
179 return page_index; in xpram_highest_page_index()
/Linux-v5.4/drivers/infiniband/core/
Dumem_odp.c504 int page_index, in ib_umem_odp_map_dma_single_page() argument
523 if (!(umem_odp->dma_list[page_index])) { in ib_umem_odp_map_dma_single_page()
531 umem_odp->dma_list[page_index] = dma_addr | access_mask; in ib_umem_odp_map_dma_single_page()
532 umem_odp->page_list[page_index] = page; in ib_umem_odp_map_dma_single_page()
534 } else if (umem_odp->page_list[page_index] == page) { in ib_umem_odp_map_dma_single_page()
535 umem_odp->dma_list[page_index] |= access_mask; in ib_umem_odp_map_dma_single_page()
538 umem_odp->page_list[page_index], page); in ib_umem_odp_map_dma_single_page()
552 (page_index << umem_odp->page_shift), in ib_umem_odp_map_dma_single_page()
554 ((page_index + 1) << umem_odp->page_shift)); in ib_umem_odp_map_dma_single_page()
/Linux-v5.4/drivers/gpu/drm/vc4/
Dvc4_bo.c194 uint32_t page_index = bo_page_index(size); in vc4_get_cache_list_for_size() local
196 if (vc4->bo_cache.size_list_size <= page_index) { in vc4_get_cache_list_for_size()
198 page_index + 1); in vc4_get_cache_list_for_size()
228 return &vc4->bo_cache.size_list[page_index]; in vc4_get_cache_list_for_size()
353 uint32_t page_index = bo_page_index(size); in vc4_bo_get_from_cache() local
359 if (page_index >= vc4->bo_cache.size_list_size) in vc4_bo_get_from_cache()
362 if (list_empty(&vc4->bo_cache.size_list[page_index])) in vc4_bo_get_from_cache()
365 bo = list_first_entry(&vc4->bo_cache.size_list[page_index], in vc4_bo_get_from_cache()
/Linux-v5.4/drivers/staging/gasket/
Dgasket_page_table.c692 int is_simple, uint page_index, in gasket_components_to_dev_address() argument
695 ulong dev_addr = (page_index << GASKET_SIMPLE_PAGE_SHIFT) | offset; in gasket_components_to_dev_address()
711 ulong page_index = in gasket_is_simple_dev_addr_bad() local
714 if (gasket_components_to_dev_address(pg_tbl, 1, page_index, in gasket_is_simple_dev_addr_bad()
721 if (page_index >= pg_tbl->num_simple_entries) { in gasket_is_simple_dev_addr_bad()
724 page_index, pg_tbl->num_simple_entries); in gasket_is_simple_dev_addr_bad()
728 if (page_index + num_pages > pg_tbl->num_simple_entries) { in gasket_is_simple_dev_addr_bad()
731 page_index + num_pages, pg_tbl->num_simple_entries); in gasket_is_simple_dev_addr_bad()
/Linux-v5.4/fs/ecryptfs/
Dread_write.c245 pgoff_t page_index, in ecryptfs_read_lower_page_segment() argument
253 offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page); in ecryptfs_read_lower_page_segment()
Dcrypto.c396 pgoff_t page_index = op == ENCRYPT ? src_page->index : dst_page->index; in crypt_extent() local
403 extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size)); in crypt_extent()
426 "rc = [%d]\n", __func__, page_index, extent_offset, rc); in crypt_extent()
/Linux-v5.4/include/linux/qed/
Dqed_chain.h265 u32 page_index = 0; in qed_chain_advance_page() local
284 page_index = *(u16 *)page_to_inc; in qed_chain_advance_page()
288 page_index = *(u32 *)page_to_inc; in qed_chain_advance_page()
290 *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index]; in qed_chain_advance_page()
/Linux-v5.4/fs/jfs/
Djfs_metapage.c584 unsigned long page_index; in __get_metapage() local
592 page_index = lblock >> l2BlocksPerPage; in __get_metapage()
593 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize; in __get_metapage()
614 page = grab_cache_page(mapping, page_index); in __get_metapage()
621 page = read_mapping_page(mapping, page_index, NULL); in __get_metapage()
/Linux-v5.4/fs/xfs/
Dxfs_buf.c1267 int page_index; in xfs_buf_ioapply_map() local
1276 page_index = 0; in xfs_buf_ioapply_map()
1279 page_index++; in xfs_buf_ioapply_map()
1302 for (; size && nr_pages; nr_pages--, page_index++) { in xfs_buf_ioapply_map()
1308 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, in xfs_buf_ioapply_map()
1531 int page_index, page_offset, csize; in xfs_buf_zero() local
1533 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; in xfs_buf_zero()
1535 page = bp->b_pages[page_index]; in xfs_buf_zero()
/Linux-v5.4/drivers/misc/vmw_vmci/
Dvmci_queue_pair.c338 const u64 page_index = in qp_memcpy_to_queue_iter() local
346 va = kmap(kernel_if->u.h.page[page_index]); in qp_memcpy_to_queue_iter()
348 va = kernel_if->u.g.vas[page_index + 1]; in qp_memcpy_to_queue_iter()
360 kunmap(kernel_if->u.h.page[page_index]); in qp_memcpy_to_queue_iter()
365 kunmap(kernel_if->u.h.page[page_index]); in qp_memcpy_to_queue_iter()
385 const u64 page_index = in qp_memcpy_from_queue_iter() local
394 va = kmap(kernel_if->u.h.page[page_index]); in qp_memcpy_from_queue_iter()
396 va = kernel_if->u.g.vas[page_index + 1]; in qp_memcpy_from_queue_iter()
408 kunmap(kernel_if->u.h.page[page_index]); in qp_memcpy_from_queue_iter()
413 kunmap(kernel_if->u.h.page[page_index]); in qp_memcpy_from_queue_iter()
/Linux-v5.4/fs/btrfs/
Dscrub.c1202 int page_index; in scrub_handle_errored_block() local
1204 for (page_index = 0; page_index < sblock->page_count; in scrub_handle_errored_block()
1205 page_index++) { in scrub_handle_errored_block()
1206 sblock->pagev[page_index]->sblock = NULL; in scrub_handle_errored_block()
1207 recover = sblock->pagev[page_index]->recover; in scrub_handle_errored_block()
1210 sblock->pagev[page_index]->recover = in scrub_handle_errored_block()
1213 scrub_page_put(sblock->pagev[page_index]); in scrub_handle_errored_block()
1282 int page_index = 0; in scrub_setup_recheck_block() local
1322 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK); in scrub_setup_recheck_block()
1344 sblock->pagev[page_index] = page; in scrub_setup_recheck_block()
[all …]
Draid56.c1086 unsigned long page_index, in rbio_add_io_page() argument
1097 disk_start = stripe->physical + (page_index << PAGE_SHIFT); in rbio_add_io_page()
1163 unsigned long page_index; in index_rbio_pages() local
1173 page_index = stripe_offset >> PAGE_SHIFT; in index_rbio_pages()
1179 rbio->bio_pages[page_index + i] = bvec.bv_page; in index_rbio_pages()
/Linux-v5.4/fs/nilfs2/
Dbtnode.c132 pgoff_t index = page_index(page); in nilfs_btnode_delete()
Dbmap.c448 key = page_index(bh->b_page) << (PAGE_SHIFT - in nilfs_bmap_data_get_key()
Dpage.c468 __xa_clear_mark(&mapping->i_pages, page_index(page), in __nilfs_clear_page_dirty()
/Linux-v5.4/fs/nfs/
Ddir.c152 unsigned long page_index; member
325 desc->page_index++; in nfs_readdir_search_array()
701 return read_cache_page(desc->file->f_mapping, desc->page_index, in get_cache_page()
729 if (desc->page_index == 0) { in readdir_search_pagecache()
808 desc->page_index = 0; in uncached_readdir()
881 desc->page_index = 0; in nfs_readdir()
Dread.c317 page, PAGE_SIZE, page_index(page)); in nfs_readpage()
/Linux-v5.4/include/linux/ceph/
Dmessenger.h200 unsigned short page_index; /* index in array */ member
/Linux-v5.4/drivers/misc/mic/scif/
Dscif_rma.c995 s64 page_index; in scif_get_window_offset() local
1000 page_index = SCIF_IOVA_PFN(offset); in scif_get_window_offset()
1001 iova_ptr = reserve_iova(&ep->rma_info.iovad, page_index, in scif_get_window_offset()
1002 page_index + num_pages - 1); in scif_get_window_offset()
/Linux-v5.4/include/linux/
Dpagemap.h441 return ((loff_t)page_index(page)) << PAGE_SHIFT; in page_file_offset()
/Linux-v5.4/fs/ocfs2/
Drefcounttree.c2912 pgoff_t page_index; in ocfs2_duplicate_clusters_by_page() local
2930 page_index = offset >> PAGE_SHIFT; in ocfs2_duplicate_clusters_by_page()
2931 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; in ocfs2_duplicate_clusters_by_page()
2942 page = find_or_create_page(mapping, page_index, GFP_NOFS); in ocfs2_duplicate_clusters_by_page()
3149 pgoff_t page_index; in ocfs2_cow_sync_writeback() local
3166 page_index = offset >> PAGE_SHIFT; in ocfs2_cow_sync_writeback()
3167 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; in ocfs2_cow_sync_writeback()
3172 page_index, GFP_NOFS); in ocfs2_cow_sync_writeback()
/Linux-v5.4/mm/
Dpage-writeback.c2481 __xa_set_mark(&mapping->i_pages, page_index(page), in __set_page_dirty_nobuffers()
2731 __xa_clear_mark(&mapping->i_pages, page_index(page), in test_clear_page_writeback()
2771 XA_STATE(xas, &mapping->i_pages, page_index(page)); in __test_set_page_writeback()

12