/Linux-v5.4/drivers/net/ethernet/cavium/liquidio/ |
D | octeon_network.h | 298 pg_info->page_offset = 0; in recv_buffer_alloc() 300 skb_pg_info->page_offset = 0; in recv_buffer_alloc() 324 skb_pg_info->page_offset = 0; in recv_buffer_fast_alloc() 348 pg_info->page_offset = 0; in recv_buffer_recycle() 353 if (pg_info->page_offset == 0) in recv_buffer_recycle() 354 pg_info->page_offset = LIO_RXBUFFER_SZ; in recv_buffer_recycle() 356 pg_info->page_offset = 0; in recv_buffer_recycle() 384 skb_pg_info->page_offset = pg_info->page_offset; in recv_buffer_reuse() 398 pg_info->page_offset = 0; in recv_buffer_destroy() 415 pg_info->page_offset = 0; in recv_buffer_free() [all …]
|
/Linux-v5.4/drivers/scsi/fnic/ |
D | fnic_trace.c | 71 fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx]; in fnic_trace_get_buf() 124 fnic_trace_entries.page_offset[rd_idx]; in fnic_get_trace_data() 166 fnic_trace_entries.page_offset[rd_idx]; in fnic_get_trace_data() 490 fnic_trace_entries.page_offset = in fnic_trace_buf_init() 493 if (!fnic_trace_entries.page_offset) { in fnic_trace_buf_init() 503 memset((void *)fnic_trace_entries.page_offset, 0, in fnic_trace_buf_init() 514 fnic_trace_entries.page_offset[i] = fnic_buf_head; in fnic_trace_buf_init() 532 if (fnic_trace_entries.page_offset) { in fnic_trace_free() 533 vfree((void *)fnic_trace_entries.page_offset); in fnic_trace_free() 534 fnic_trace_entries.page_offset = NULL; in fnic_trace_free() [all …]
|
/Linux-v5.4/drivers/gpu/drm/ttm/ |
D | ttm_bo_vm.c | 98 unsigned long page_offset) in ttm_bo_io_mem_pfn() argument 103 return bdev->driver->io_mem_pfn(bo, page_offset); in ttm_bo_io_mem_pfn() 106 + page_offset; in ttm_bo_io_mem_pfn() 115 unsigned long page_offset; in ttm_bo_vm_fault() local 213 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + in ttm_bo_vm_fault() 218 if (unlikely(page_offset >= bo->num_pages)) { in ttm_bo_vm_fault() 261 pfn = ttm_bo_io_mem_pfn(bo, page_offset); in ttm_bo_vm_fault() 263 page = ttm->pages[page_offset]; in ttm_bo_vm_fault() 271 page_offset; in ttm_bo_vm_fault() 290 if (unlikely(++page_offset >= page_last)) in ttm_bo_vm_fault()
|
/Linux-v5.4/drivers/gpu/drm/vkms/ |
D | vkms_gem.c | 48 pgoff_t page_offset; in vkms_gem_fault() local 52 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; in vkms_gem_fault() 55 if (page_offset > num_pages) in vkms_gem_fault() 60 get_page(obj->pages[page_offset]); in vkms_gem_fault() 61 vmf->page = obj->pages[page_offset]; in vkms_gem_fault() 70 page = shmem_read_mapping_page(mapping, page_offset); in vkms_gem_fault()
|
/Linux-v5.4/fs/orangefs/ |
D | inode.c | 42 off = page_offset(page); in orangefs_writepage_locked() 110 ow->bv[i].bv_len = min(page_offset(ow->pages[i]) + PAGE_SIZE, in orangefs_writepages_work() 112 max(ow->off, page_offset(ow->pages[i])); in orangefs_writepages_work() 115 page_offset(ow->pages[i]); in orangefs_writepages_work() 306 off = page_offset(page); in orangefs_readpage() 466 if (pos == page_offset(page) && in orangefs_write_end() 495 } else if (page_offset(page) + offset <= wr->pos && in orangefs_invalidatepage() 496 wr->pos + wr->len <= page_offset(page) + offset + length) { in orangefs_invalidatepage() 505 } else if (wr->pos < page_offset(page) + offset && in orangefs_invalidatepage() 506 wr->pos + wr->len <= page_offset(page) + offset + length && in orangefs_invalidatepage() [all …]
|
/Linux-v5.4/scripts/ |
D | leaking_addresses.pl | 304 state $page_offset = get_page_offset(); 310 if (hex($match) < $page_offset) { 320 my $page_offset; 328 $page_offset = get_kernel_config_option('CONFIG_PAGE_OFFSET'); 329 if (!$page_offset) { 332 return $page_offset;
|
/Linux-v5.4/fs/hfs/ |
D | bnode.c | 23 off += node->page_offset; in hfs_bnode_read() 65 off += node->page_offset; in hfs_bnode_write() 90 off += node->page_offset; in hfs_bnode_clear() 106 src += src_node->page_offset; in hfs_bnode_copy() 107 dst += dst_node->page_offset; in hfs_bnode_copy() 125 src += node->page_offset; in hfs_bnode_move() 126 dst += node->page_offset; in hfs_bnode_move() 279 node->page_offset = off & ~PAGE_MASK; in __hfs_bnode_create() 338 desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); in hfs_bnode_find() 428 memset(kmap(*pagep) + node->page_offset, 0, in hfs_bnode_create()
|
/Linux-v5.4/drivers/gpu/drm/i915/gem/ |
D | i915_gem_mman.c | 178 pgoff_t page_offset, in compute_partial_view() argument 187 view.partial.offset = rounddown(page_offset, chunk); in compute_partial_view() 229 pgoff_t page_offset; in i915_gem_fault() local 238 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; in i915_gem_fault() 240 trace_i915_gem_object_fault(obj, page_offset, true, write); in i915_gem_fault() 270 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); in i915_gem_fault()
|
/Linux-v5.4/drivers/mtd/tests/ |
D | nandbiterrs.c | 45 static unsigned page_offset; variable 46 module_param(page_offset, uint, S_IRUGO); 47 MODULE_PARM_DESC(page_offset, "Page number relative to dev start"); 358 offset = (loff_t)page_offset * mtd->writesize; in mtd_nandbiterrs_init() 362 page_offset, offset, eraseblock); in mtd_nandbiterrs_init()
|
/Linux-v5.4/drivers/gpu/drm/gma500/ |
D | gem.c | 134 pgoff_t page_offset; in psb_gem_fault() local 162 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in psb_gem_fault() 168 pfn = page_to_pfn(r->pages[page_offset]); in psb_gem_fault()
|
/Linux-v5.4/drivers/nvmem/ |
D | rave-sp-eeprom.c | 161 const unsigned int page_offset = offset % RAVE_SP_EEPROM_PAGE_SIZE; in rave_sp_eeprom_page_access() local 172 if (WARN_ON(data_len > sizeof(page.data) - page_offset)) in rave_sp_eeprom_page_access() 187 memcpy(&page.data[page_offset], data, data_len); in rave_sp_eeprom_page_access() 199 memcpy(data, &page.data[page_offset], data_len); in rave_sp_eeprom_page_access()
|
/Linux-v5.4/drivers/gpu/drm/qxl/ |
D | qxl_image.c | 166 unsigned int page_base, page_offset, out_offset; in qxl_image_init_helper() local 175 page_offset = offset_in_page(out_offset); in qxl_image_init_helper() 176 size = min((int)(PAGE_SIZE - page_offset), remain); in qxl_image_init_helper() 179 k_data = ptr + page_offset; in qxl_image_init_helper()
|
/Linux-v5.4/fs/hfsplus/ |
D | bnode.c | 27 off += node->page_offset; in hfs_bnode_read() 80 off += node->page_offset; in hfs_bnode_write() 110 off += node->page_offset; in hfs_bnode_clear() 136 src += src_node->page_offset; in hfs_bnode_copy() 137 dst += dst_node->page_offset; in hfs_bnode_copy() 193 src += node->page_offset; in hfs_bnode_move() 194 dst += node->page_offset; in hfs_bnode_move() 445 node->page_offset = off & ~PAGE_MASK; in __hfs_bnode_create() 506 node->page_offset); in hfs_bnode_find() 596 memset(kmap(*pagep) + node->page_offset, 0, in hfs_bnode_create()
|
D | wrapper.c | 76 unsigned int page_offset = offset_in_page(buf); in hfsplus_submit_bio() local 77 unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset, in hfsplus_submit_bio() 80 ret = bio_add_page(bio, virt_to_page(buf), len, page_offset); in hfsplus_submit_bio()
|
/Linux-v5.4/net/ceph/ |
D | messenger.c | 527 int page_offset, size_t length) in ceph_tcp_recvpage() argument 531 .bv_offset = page_offset, in ceph_tcp_recvpage() 537 BUG_ON(page_offset + length > PAGE_SIZE); in ceph_tcp_recvpage() 830 size_t *page_offset, in ceph_msg_data_bio_next() argument 836 *page_offset = bv.bv_offset; in ceph_msg_data_bio_next() 891 size_t *page_offset, in ceph_msg_data_bvecs_next() argument 897 *page_offset = bv.bv_offset; in ceph_msg_data_bvecs_next() 946 cursor->page_offset = data->alignment & ~PAGE_MASK; in ceph_msg_data_pages_cursor_init() 950 BUG_ON(length > SIZE_MAX - cursor->page_offset); in ceph_msg_data_pages_cursor_init() 951 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE; in ceph_msg_data_pages_cursor_init() [all …]
|
/Linux-v5.4/drivers/gpu/drm/vgem/ |
D | vgem_drv.c | 79 pgoff_t page_offset; in vgem_gem_fault() local 80 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; in vgem_gem_fault() 84 if (page_offset >= num_pages) in vgem_gem_fault() 89 get_page(obj->pages[page_offset]); in vgem_gem_fault() 90 vmf->page = obj->pages[page_offset]; in vgem_gem_fault() 99 page_offset); in vgem_gem_fault()
|
/Linux-v5.4/drivers/net/ethernet/intel/iavf/ |
D | iavf_txrx.c | 681 rx_bi->page_offset, in iavf_clean_rx_ring() 694 rx_bi->page_offset = 0; in iavf_clean_rx_ring() 846 bi->page_offset = iavf_rx_offset(rx_ring); in iavf_alloc_mapped_page() 898 bi->page_offset, in iavf_alloc_rx_buffers() 905 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in iavf_alloc_rx_buffers() 1140 new_buff->page_offset = old_buff->page_offset; in iavf_reuse_rx_page() 1200 if (rx_buffer->page_offset > IAVF_LAST_OFFSET) in iavf_can_reuse_rx_page() 1243 rx_buffer->page_offset, size, truesize); in iavf_add_rx_frag() 1247 rx_buffer->page_offset ^= truesize; in iavf_add_rx_frag() 1249 rx_buffer->page_offset += truesize; in iavf_add_rx_frag() [all …]
|
/Linux-v5.4/drivers/net/ethernet/sfc/falcon/ |
D | rx.c | 59 return page_address(buf->page) + buf->page_offset; in ef4_rx_buf_va() 154 unsigned int page_offset; in ef4_init_rx_buffers() local 185 page_offset = sizeof(struct ef4_rx_page_state); in ef4_init_rx_buffers() 192 rx_buf->page_offset = page_offset + efx->rx_ip_align; in ef4_init_rx_buffers() 198 page_offset += efx->rx_page_buf_step; in ef4_init_rx_buffers() 199 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in ef4_init_rx_buffers() 447 rx_buf->page, rx_buf->page_offset, in ef4_rx_packet_gro() 492 rx_buf->page_offset += hdr_len; in ef4_rx_mk_skb() 497 rx_buf->page, rx_buf->page_offset, in ef4_rx_mk_skb() 579 rx_buf->page_offset += efx->rx_prefix_size; in ef4_rx_packet()
|
/Linux-v5.4/drivers/infiniband/hw/mlx5/ |
D | srq_cmd.c | 17 u32 page_offset = in->page_offset; in get_pas_size() local 21 u32 rq_sz_po = rq_sz + (page_offset * po_quanta); in get_pas_size() 34 MLX5_SET(wq, wq, page_offset, in->page_offset); in set_wq() 47 MLX5_SET(srqc, srqc, page_offset, in->page_offset); in set_srqc() 62 in->page_offset = MLX5_GET(wq, wq, page_offset); in get_wq() 75 in->page_offset = MLX5_GET(srqc, srqc, page_offset); in get_srqc()
|
/Linux-v5.4/tools/testing/selftests/powerpc/primitives/ |
D | load_unaligned_zeropad.c | 102 static int do_one_test(char *p, int page_offset) in do_one_test() argument 114 …printf("offset %u load_unaligned_zeropad returned 0x%lx, should be 0x%lx\n", page_offset, got, sho… in do_one_test()
|
/Linux-v5.4/drivers/gpu/drm/panfrost/ |
D | panfrost_mmu.c | 441 pgoff_t page_offset; in panfrost_mmu_map_fault_addr() local 459 page_offset = addr >> PAGE_SHIFT; in panfrost_mmu_map_fault_addr() 460 page_offset -= bo->node.start; in panfrost_mmu_map_fault_addr() 490 for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { in panfrost_mmu_map_fault_addr() 501 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; in panfrost_mmu_map_fault_addr() 502 ret = sg_alloc_table_from_pages(sgt, pages + page_offset, in panfrost_mmu_map_fault_addr()
|
/Linux-v5.4/drivers/net/ethernet/sfc/ |
D | rx.c | 59 return page_address(buf->page) + buf->page_offset; in efx_rx_buf_va() 154 unsigned int page_offset; in efx_init_rx_buffers() local 185 page_offset = sizeof(struct efx_rx_page_state); in efx_init_rx_buffers() 192 rx_buf->page_offset = page_offset + efx->rx_ip_align; in efx_init_rx_buffers() 198 page_offset += efx->rx_page_buf_step; in efx_init_rx_buffers() 199 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in efx_init_rx_buffers() 436 rx_buf->page, rx_buf->page_offset, in efx_rx_packet_gro() 481 rx_buf->page_offset += hdr_len; in efx_rx_mk_skb() 486 rx_buf->page, rx_buf->page_offset, in efx_rx_mk_skb() 568 rx_buf->page_offset += efx->rx_prefix_size; in efx_rx_packet()
|
/Linux-v5.4/drivers/gpu/drm/udl/ |
D | udl_gem.c | 108 unsigned int page_offset; in udl_gem_fault() local 110 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in udl_gem_fault() 115 page = obj->pages[page_offset]; in udl_gem_fault()
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx4/ |
D | en_rx.c | 70 frag->page_offset = priv->rx_headroom; in mlx4_alloc_page() 89 frags->page_offset); in mlx4_en_alloc_frags() 148 frags->page_offset = XDP_PACKET_HEADROOM; in mlx4_en_prepare_rx_desc() 488 dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset, in mlx4_en_complete_rx_desc() 491 __skb_fill_page_desc(skb, nr, page, frags->page_offset, in mlx4_en_complete_rx_desc() 496 frags->page_offset ^= PAGE_SIZE / 2; in mlx4_en_complete_rx_desc() 507 frags->page_offset += sz_align; in mlx4_en_complete_rx_desc() 508 release = frags->page_offset + frag_info->frag_size > PAGE_SIZE; in mlx4_en_complete_rx_desc() 706 va = page_address(frags[0].page) + frags[0].page_offset; in mlx4_en_process_rx_cq() 735 dma = frags[0].dma + frags[0].page_offset; in mlx4_en_process_rx_cq() [all …]
|
/Linux-v5.4/drivers/net/ethernet/intel/ice/ |
D | ice_txrx.c | 291 rx_buf->page_offset, in ice_clean_rx_ring() 300 rx_buf->page_offset = 0; in ice_clean_rx_ring() 447 bi->page_offset = 0; in ice_alloc_mapped_page() 488 bi->page_offset, in ice_alloc_rx_bufs() 495 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ice_alloc_rx_bufs() 542 rx_buf->page_offset ^= size; in ice_rx_buf_adjust_pg_offset() 545 rx_buf->page_offset += size; in ice_rx_buf_adjust_pg_offset() 575 if (rx_buf->page_offset > last_offset) in ice_can_reuse_rx_page() 614 rx_buf->page_offset, size, truesize); in ice_add_rx_frag() 645 new_buf->page_offset = old_buf->page_offset; in ice_reuse_rx_page() [all …]
|