/Linux-v5.4/include/linux/qed/ |
D | qed_chain.h | 128 u32 page_cnt; member 242 return p_chain->page_cnt; in qed_chain_get_page_cnt() 282 if (++(*(u16 *)page_to_inc) == p_chain->page_cnt) in qed_chain_advance_page() 286 if (++(*(u32 *)page_to_inc) == p_chain->page_cnt) in qed_chain_advance_page() 475 u32 reset_val = p_chain->page_cnt - 1; in qed_chain_reset() 513 u32 page_cnt, in qed_chain_init_params() argument 534 p_chain->page_cnt = page_cnt; in qed_chain_init_params() 535 p_chain->capacity = p_chain->usable_per_page * page_cnt; in qed_chain_init_params() 536 p_chain->size = p_chain->elem_per_page * page_cnt; in qed_chain_init_params() 646 last_page_idx = p_chain->page_cnt - 1; in qed_chain_get_last_elem() [all …]
|
/Linux-v5.4/net/rds/ |
D | ib_fmr.c | 108 int page_cnt, sg_dma_len; in rds_ib_map_fmr() local 119 page_cnt = 0; in rds_ib_map_fmr() 131 ++page_cnt; in rds_ib_map_fmr() 140 ++page_cnt; in rds_ib_map_fmr() 147 page_cnt += len >> PAGE_SHIFT; in rds_ib_map_fmr() 148 if (page_cnt > ibmr->pool->fmr_attr.max_pages) { in rds_ib_map_fmr() 153 dma_pages = kmalloc_array_node(sizeof(u64), page_cnt, GFP_ATOMIC, in rds_ib_map_fmr() 160 page_cnt = 0; in rds_ib_map_fmr() 166 dma_pages[page_cnt++] = in rds_ib_map_fmr() 170 ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr); in rds_ib_map_fmr()
|
/Linux-v5.4/arch/mips/cavium-octeon/executive/ |
D | cvmx-helper-util.c | 133 union cvmx_ipd_portx_bp_page_cnt page_cnt; in cvmx_helper_setup_red() local 141 page_cnt.u64 = 0; in cvmx_helper_setup_red() 142 page_cnt.s.bp_enb = 0; in cvmx_helper_setup_red() 143 page_cnt.s.page_cnt = 100; in cvmx_helper_setup_red() 148 page_cnt.u64); in cvmx_helper_setup_red()
|
/Linux-v5.4/drivers/hv/ |
D | ring_buffer.c | 193 struct page *pages, u32 page_cnt) in hv_ringbuffer_init() argument 204 pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *), in hv_ringbuffer_init() 210 for (i = 0; i < 2 * (page_cnt - 1); i++) in hv_ringbuffer_init() 211 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; in hv_ringbuffer_init() 214 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); in hv_ringbuffer_init() 228 ring_info->ring_size = page_cnt << PAGE_SHIFT; in hv_ringbuffer_init()
|
D | hv_balloon.c | 139 __u64 page_cnt:24; member 1000 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt; in hot_add_req() 1003 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt; in hot_add_req() 1193 int num_pages = range_array->finfo.page_cnt; in free_balloon_pages() 1252 bl_resp->range_array[i].finfo.page_cnt = alloc_unit; in alloc_balloon_pages()
|
/Linux-v5.4/drivers/base/firmware_loader/ |
D | fallback.c | 320 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); in firmware_rw() local 325 memcpy(buffer, page_data + page_ofs, page_cnt); in firmware_rw() 327 memcpy(page_data + page_ofs, buffer, page_cnt); in firmware_rw() 330 buffer += page_cnt; in firmware_rw() 331 offset += page_cnt; in firmware_rw() 332 count -= page_cnt; in firmware_rw()
|
/Linux-v5.4/arch/mips/include/asm/octeon/ |
D | cvmx-ipd-defs.h | 324 uint64_t page_cnt:17; member 326 uint64_t page_cnt:17; 999 uint64_t page_cnt:17; member 1001 uint64_t page_cnt:17; 1014 uint64_t page_cnt:17; member 1016 uint64_t page_cnt:17; 1029 uint64_t page_cnt:17; member 1031 uint64_t page_cnt:17; 1387 uint64_t page_cnt:25; member 1389 uint64_t page_cnt:25;
|
/Linux-v5.4/drivers/staging/rts5208/ |
D | xd.c | 1506 u8 reg_val, page_cnt; in xd_read_multiple_pages() local 1512 page_cnt = end_page - start_page; in xd_read_multiple_pages() 1538 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt); in xd_read_multiple_pages() 1543 page_cnt * 512, DMA_512); in xd_read_multiple_pages() 1553 retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512, in xd_read_multiple_pages() 1709 u8 page_cnt, reg_val; in xd_write_multiple_pages() local 1717 page_cnt = end_page - start_page; in xd_write_multiple_pages() 1739 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt); in xd_write_multiple_pages() 1743 page_cnt * 512, DMA_512); in xd_write_multiple_pages() 1752 retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512, in xd_write_multiple_pages() [all …]
|
D | ms.c | 3400 u8 start_page, end_page = 0, page_cnt; local 3526 page_cnt = end_page - start_page; 3529 start_page, end_page, page_cnt); 3566 total_sec_cnt -= page_cnt; 3568 ptr += page_cnt * 512;
|
/Linux-v5.4/drivers/infiniband/hw/i40iw/ |
D | i40iw_verbs.h | 96 u32 page_cnt; member
|
D | i40iw_verbs.c | 1404 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); in i40iw_setup_pbles() 1581 iwmr->page_cnt = max_num_sg; in i40iw_alloc_mr() 1583 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); in i40iw_alloc_mr() 1617 if (unlikely(iwmr->npages == iwmr->page_cnt)) in i40iw_set_page() 1802 iwmr->page_cnt = (u32)pbl_depth; in i40iw_reg_user_mr() 1827 use_pbles = (iwmr->page_cnt != 1); in i40iw_reg_user_mr() 1918 iwmr->page_cnt = 1; in i40iw_reg_phys_mr()
|
/Linux-v5.4/drivers/net/ethernet/qlogic/qed/ |
D | qed_sp_commands.c | 335 u8 page_cnt, i; in qed_sp_pf_start() local 394 page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); in qed_sp_pf_start() 395 p_ramrod->event_ring_num_pages = page_cnt; in qed_sp_pf_start()
|
D | qed_dev.c | 4622 for (i = 0; i < p_chain->page_cnt; i++) { in qed_chain_free_next_ptr() 4652 u32 page_cnt = p_chain->page_cnt, i, pbl_size; in qed_chain_free_pbl() local 4661 for (i = 0; i < page_cnt; i++) { in qed_chain_free_pbl() 4673 pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; in qed_chain_free_pbl() 4703 size_t elem_size, u32 page_cnt) in qed_chain_alloc_sanity_check() argument 4705 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; in qed_chain_alloc_sanity_check() 4732 for (i = 0; i < p_chain->page_cnt; i++) { in qed_chain_alloc_next_ptr() 4781 u32 page_cnt = p_chain->page_cnt, size, i; in qed_chain_alloc_pbl() local 4787 size = page_cnt * sizeof(*pp_virt_addr_tbl); in qed_chain_alloc_pbl() 4798 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; in qed_chain_alloc_pbl() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/efa/ |
D | efa_verbs.c | 1065 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt) in efa_vmalloc_buf_to_sg() argument 1071 sglist = kcalloc(page_cnt, sizeof(*sglist), GFP_KERNEL); in efa_vmalloc_buf_to_sg() 1074 sg_init_table(sglist, page_cnt); in efa_vmalloc_buf_to_sg() 1075 for (i = 0; i < page_cnt; i++) { in efa_vmalloc_buf_to_sg() 1096 int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages; in pbl_chunk_list_create() local 1107 chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK); in pbl_chunk_list_create() 1118 page_cnt); in pbl_chunk_list_create() 1129 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) + in pbl_chunk_list_create()
|
/Linux-v5.4/tools/lib/bpf/ |
D | libbpf.h | 379 perf_buffer__new(int map_fd, size_t page_cnt, 412 perf_buffer__new_raw(int map_fd, size_t page_cnt,
|
D | libbpf.c | 5417 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, 5420 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, in perf_buffer__new() argument 5437 return __perf_buffer__new(map_fd, page_cnt, &p); in perf_buffer__new() 5441 perf_buffer__new_raw(int map_fd, size_t page_cnt, in perf_buffer__new_raw() argument 5453 return __perf_buffer__new(map_fd, page_cnt, &p); in perf_buffer__new_raw() 5456 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, in __perf_buffer__new() argument 5465 if (page_cnt & (page_cnt - 1)) { in __perf_buffer__new() 5467 page_cnt); in __perf_buffer__new() 5496 pb->mmap_size = pb->page_size * page_cnt; in __perf_buffer__new()
|
/Linux-v5.4/drivers/misc/mic/scif/ |
D | scif_nodeqp.c | 360 scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt) in scif_p2p_setsg() argument 366 sg = kcalloc(page_cnt, sizeof(struct scatterlist), GFP_KERNEL); in scif_p2p_setsg() 369 sg_init_table(sg, page_cnt); in scif_p2p_setsg() 370 for (i = 0; i < page_cnt; i++) { in scif_p2p_setsg()
|
/Linux-v5.4/fs/ubifs/ |
D | file.c | 724 int err, page_idx, page_cnt, ret = 0, n = 0; in ubifs_do_bulk_read() local 739 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT; in ubifs_do_bulk_read() 740 if (!page_cnt) { in ubifs_do_bulk_read() 783 for (page_idx = 1; page_idx < page_cnt; page_idx++) { in ubifs_do_bulk_read()
|
/Linux-v5.4/drivers/infiniband/hw/qedr/ |
D | main.c | 790 u32 page_cnt; in qedr_init_hw() local 804 page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl); in qedr_init_hw() 805 cur_pbl->num_pbl_pages = page_cnt; in qedr_init_hw()
|
D | verbs.c | 750 int chain_entries, int page_cnt, in qedr_init_cq_params() argument 761 params->pbl_num_pages = page_cnt; in qedr_init_cq_params() 824 int page_cnt; in qedr_create_cq() local 865 page_cnt = cq->q.pbl_info.num_pbes; in qedr_create_cq() 881 page_cnt = qed_chain_get_page_cnt(&cq->pbl); in qedr_create_cq() 886 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt, in qedr_create_cq() 1364 u32 page_cnt, page_size; in qedr_create_srq() local 1394 page_cnt = srq->usrq.pbl_info.num_pbes; in qedr_create_srq() 1406 page_cnt = qed_chain_get_page_cnt(pbl); in qedr_create_srq() 1415 in_params.num_pages = page_cnt; in qedr_create_srq()
|
/Linux-v5.4/drivers/net/ethernet/qlogic/qede/ |
D | qede_main.c | 1984 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl); in qede_start_txq() local 2005 page_cnt, &ret_params); in qede_start_txq() 2074 u32 page_cnt; in qede_start_queues() local 2090 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring); in qede_start_queues() 2096 page_cnt, &ret_params); in qede_start_queues()
|
/Linux-v5.4/fs/btrfs/ |
D | ioctl.c | 1237 u64 page_cnt; in cluster_pages_for_defrag() local 1251 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); in cluster_pages_for_defrag() 1255 page_cnt << PAGE_SHIFT); in cluster_pages_for_defrag() 1262 for (i = 0; i < page_cnt; i++) { in cluster_pages_for_defrag() 1339 if (i_done != page_cnt) { in cluster_pages_for_defrag() 1345 (page_cnt - i_done) << PAGE_SHIFT, true); in cluster_pages_for_defrag() 1363 btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT); in cluster_pages_for_defrag() 1373 page_cnt << PAGE_SHIFT, true); in cluster_pages_for_defrag() 1374 btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT); in cluster_pages_for_defrag()
|
/Linux-v5.4/drivers/mmc/core/ |
D | mmc_test.c | 341 unsigned long page_cnt = 0; in mmc_test_alloc_mem() local 378 if (page_cnt < min_page_cnt) in mmc_test_alloc_mem() 388 page_cnt += 1UL << order; in mmc_test_alloc_mem() 390 if (page_cnt < min_page_cnt) in mmc_test_alloc_mem()
|
/Linux-v5.4/arch/sparc/kernel/ |
D | traps_64.c | 2167 unsigned long page_cnt = DIV_ROUND_UP(ent->err_size, in sun4v_nonresum_error_user_handled() local 2174 page_cnt); in sun4v_nonresum_error_user_handled() 2176 while (page_cnt-- > 0) { in sun4v_nonresum_error_user_handled()
|