Home
last modified time | relevance | path

Searched refs:num_desc (Results 1 – 25 of 42) sorted by relevance

12

/Linux-v5.4/drivers/infiniband/hw/hfi1/
Diowait.h342 u16 num_desc = 0; in iowait_get_desc() local
348 num_desc = tx->num_desc; in iowait_get_desc()
352 return num_desc; in iowait_get_desc()
357 u32 num_desc = 0; in iowait_get_all_desc() local
359 num_desc = iowait_get_desc(&w->wait[IOWAIT_IB_SE]); in iowait_get_all_desc()
360 num_desc += iowait_get_desc(&w->wait[IOWAIT_TID_SE]); in iowait_get_all_desc()
361 return num_desc; in iowait_get_all_desc()
Dsdma.h556 tx->num_desc = 0; in sdma_txinit_ahg()
641 struct sdma_desc *desc = &tx->descp[tx->num_desc]; in make_tx_sdma_desc()
643 if (!tx->num_desc) { in make_tx_sdma_desc()
667 if (tx->num_desc) in sdma_txclean()
675 tx->descp[tx->num_desc].qw[0] |= in _sdma_close_tx()
677 tx->descp[tx->num_desc].qw[1] |= in _sdma_close_tx()
680 tx->descp[tx->num_desc].qw[1] |= in _sdma_close_tx()
710 tx->num_desc++; in _sdma_txadd_daddr()
740 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_page()
787 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_daddr()
[all …]
Dsdma_txreq.h118 u16 num_desc; member
133 return tx->num_desc; in sdma_txreq_built()
Dsdma.c1681 if (tx->num_desc) { in __sdma_txclean()
1689 for (i = 1 + skip; i < tx->num_desc; i++) in __sdma_txclean()
1691 tx->num_desc = 0; in __sdma_txclean()
1788 u32 num_desc; in sdma_desc_avail() local
1795 num_desc = iowait_get_all_desc(wait); in sdma_desc_avail()
1796 if (num_desc > avail) in sdma_desc_avail()
1798 avail -= num_desc; in sdma_desc_avail()
2323 for (i = 1; i < tx->num_desc; i++, descp++) { in submit_tx()
2347 sde->desc_avail -= tx->num_desc; in submit_tx()
2363 if (tx->num_desc <= sde->desc_avail) in sdma_check_progress()
[all …]
Dverbs_txreq.h95 tx->txreq.num_desc = 0; in get_txreq()
/Linux-v5.4/drivers/gpu/drm/via/
Dvia_dmablit.c69 int num_desc = vsg->num_desc; in via_unmap_blit_from_device() local
70 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; in via_unmap_blit_from_device()
71 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; in via_unmap_blit_from_device()
76 while (num_desc--) { in via_unmap_blit_from_device()
112 int num_desc = 0; in via_map_blit_for_device() local
151 num_desc++; in via_map_blit_for_device()
164 vsg->num_desc = num_desc; in via_map_blit_for_device()
269 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / in via_alloc_desc_pages()
282 vsg->num_desc); in via_alloc_desc_pages()
Dvia_dmablit.h45 int num_desc; member
/Linux-v5.4/drivers/soc/ti/
Dknav_qmss_queue.c700 for (i = 0; i < pool->num_desc; i++) { in kdesc_fill_pool()
734 WARN_ON(i != pool->num_desc); in kdesc_empty_pool()
765 int num_desc, int region_id) in knav_pool_create() argument
814 if (num_desc > (region->num_desc - region->used_desc)) { in knav_pool_create()
829 if ((pi->region_offset - last_offset) >= num_desc) { in knav_pool_create()
833 last_offset = pi->region_offset + pi->num_desc; in knav_pool_create()
839 pool->num_desc = num_desc; in knav_pool_create()
841 region->used_desc += num_desc; in knav_pool_create()
881 pool->region->used_desc -= pool->num_desc; in knav_pool_destroy()
999 if (!region->num_desc) { in knav_queue_setup_region()
[all …]
Dknav_qmss.h199 unsigned num_desc; member
226 int num_desc; member
/Linux-v5.4/drivers/net/ethernet/qlogic/netxen/
Dnetxen_nic_init.c101 for (i = 0; i < rds_ring->num_desc; ++i) { in netxen_release_rx_buffers()
124 for (i = 0; i < tx_ring->num_desc; i++) { in netxen_release_tx_buffers()
196 tx_ring->num_desc = adapter->num_txd; in netxen_alloc_sw_resources()
218 rds_ring->num_desc = adapter->num_rxd; in netxen_alloc_sw_resources()
237 rds_ring->num_desc = adapter->num_jumbo_rxd; in netxen_alloc_sw_resources()
253 rds_ring->num_desc = adapter->num_lro_rxd; in netxen_alloc_sw_resources()
270 for (i = 0; i < rds_ring->num_desc; i++) { in netxen_alloc_sw_resources()
284 sds_ring->num_desc = adapter->num_rxd; in netxen_alloc_sw_resources()
1438 index = get_next_index(index, sds_ring->num_desc); in netxen_handle_fw_message()
1532 if (unlikely(index >= rds_ring->num_desc)) in netxen_process_rcv()
[all …]
Dnetxen_nic_ethtool.c418 u32 num_desc; in netxen_validate_ringparam() local
419 num_desc = max(val, min); in netxen_validate_ringparam()
420 num_desc = min(num_desc, max); in netxen_validate_ringparam()
421 num_desc = roundup_pow_of_two(num_desc); in netxen_validate_ringparam()
423 if (val != num_desc) { in netxen_validate_ringparam()
425 netxen_nic_driver_name, r_name, num_desc, val); in netxen_validate_ringparam()
428 return num_desc; in netxen_validate_ringparam()
Dnetxen_nic_ctx.c328 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); in nx_fw_cmd_create_rx_ctx()
341 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); in nx_fw_cmd_create_rx_ctx()
468 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); in nx_fw_cmd_create_tx_ctx()
694 hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc); in netxen_init_old_ctx()
703 cpu_to_le32(rds_ring->num_desc); in netxen_init_old_ctx()
711 hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc); in netxen_init_old_ctx()
714 hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc); in netxen_init_old_ctx()
Dnetxen_nic.h60 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
62 (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc)
64 (sizeof(struct status_desc) * (sds_ring)->num_desc)
66 (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc)
68 (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
611 u32 num_desc; member
625 u32 num_desc; member
646 u32 num_desc; member
1862 tx_ring->sw_consumer, tx_ring->num_desc); in netxen_tx_avail()
/Linux-v5.4/drivers/net/ethernet/toshiba/
Dspider_net.c286 dma_free_coherent(&card->pdev->dev, chain->num_desc, in spider_net_free_chain()
310 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr); in spider_net_init_chain()
317 memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr)); in spider_net_init_chain()
323 for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) { in spider_net_init_chain()
720 if (cnt < card->tx_chain.num_desc/4) in spider_net_set_low_watermark()
994 chain->num_desc); in show_rx_chain()
1024 int from = (chain->num_desc + off - cnt) % chain->num_desc; in show_rx_chain()
1025 int to = (chain->num_desc + off - 1) % chain->num_desc; in show_rx_chain()
1093 for (i=0; i<chain->num_desc; i++) { in spider_net_resync_head_ptr()
1113 for (i=0; i<chain->num_desc; i++) { in spider_net_resync_tail_ptr()
[all …]
Dspider_net_ethtool.c118 ering->tx_pending = card->tx_chain.num_desc; in spider_net_ethtool_get_ringparam()
120 ering->rx_pending = card->rx_chain.num_desc; in spider_net_ethtool_get_ringparam()
/Linux-v5.4/drivers/net/ethernet/qlogic/qlcnic/
Dqlcnic_io.c297 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_82xx_change_filter()
413 producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_tx_encap_pkt()
536 producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_tx_pkt()
549 producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_tx_pkt()
675 num_txd = tx_ring->num_desc; in qlcnic_xmit_frame()
876 producer = get_next_index(producer, rds_ring->num_desc); in qlcnic_post_rx_buffers_nodb()
880 writel((producer - 1) & (rds_ring->num_desc - 1), in qlcnic_post_rx_buffers_nodb()
921 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); in qlcnic_process_cmd_ring()
1096 index = get_next_index(index, sds_ring->num_desc); in qlcnic_handle_fw_message()
1212 if (unlikely(index >= rds_ring->num_desc)) in qlcnic_process_rcv()
[all …]
Dqlcnic_init.c93 for (i = 0; i < rds_ring->num_desc; ++i) { in qlcnic_release_rx_buffers()
122 for (i = 0; i < rds_ring->num_desc; i++) { in qlcnic_reset_rx_buffers_list()
140 for (i = 0; i < tx_ring->num_desc; i++) { in qlcnic_release_tx_buffers()
206 rds_ring->num_desc = adapter->num_rxd; in qlcnic_alloc_sw_resources()
212 rds_ring->num_desc = adapter->num_jumbo_rxd; in qlcnic_alloc_sw_resources()
234 for (i = 0; i < rds_ring->num_desc; i++) { in qlcnic_alloc_sw_resources()
247 sds_ring->num_desc = adapter->num_rxd; in qlcnic_alloc_sw_resources()
Dqlcnic_ethtool.c652 u32 num_desc; in qlcnic_validate_ringparam() local
653 num_desc = max(val, min); in qlcnic_validate_ringparam()
654 num_desc = min(num_desc, max); in qlcnic_validate_ringparam()
655 num_desc = roundup_pow_of_two(num_desc); in qlcnic_validate_ringparam()
657 if (val != num_desc) { in qlcnic_validate_ringparam()
659 qlcnic_driver_name, r_name, num_desc, val); in qlcnic_validate_ringparam()
662 return num_desc; in qlcnic_validate_ringparam()
Dqlcnic.h66 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
68 (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
70 (sizeof(struct status_desc) * (sds_ring)->num_desc)
72 (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
74 (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
580 u32 num_desc; member
592 u32 num_desc; member
625 u32 num_desc; member
1731 return tx_ring->sw_consumer + tx_ring->num_desc - in qlcnic_tx_avail()
/Linux-v5.4/drivers/net/ethernet/chelsio/cxgb4/
Dcudbg_lib.h244 entry->num_desc = txq->size; in cudbg_fill_qdesc_txq()
256 entry->num_desc = rxq->size; in cudbg_fill_qdesc_rxq()
268 entry->num_desc = flq->size; in cudbg_fill_qdesc_flq()
/Linux-v5.4/include/linux/soc/ti/
Dknav_qmss.h79 int num_desc, int region_id);
/Linux-v5.4/drivers/dma/qcom/
Dbam_dma.c65 u32 num_desc; member
642 async_desc->num_desc = num_alloc; in bam_prep_slave_sg()
834 async_desc->num_desc -= async_desc->xfer_len; in process_channel_irqs()
843 if (!async_desc->num_desc) { in process_channel_irqs()
934 for (i = 0; i < async_desc->num_desc; i++) in bam_tx_status()
1016 if (async_desc->num_desc > avail) in bam_start_dma()
1019 async_desc->xfer_len = async_desc->num_desc; in bam_start_dma()
1022 if (async_desc->num_desc == async_desc->xfer_len) in bam_start_dma()
/Linux-v5.4/drivers/net/ethernet/ti/
Ddavinci_cpdma.c85 int num_desc; member
226 pool->num_desc = pool->mem_size / pool->desc_size; in cpdma_desc_pool_create()
234 pool->num_desc = cpdma_params->descs_pool_size; in cpdma_desc_pool_create()
235 pool->mem_size = pool->desc_size * pool->num_desc; in cpdma_desc_pool_create()
301 struct cpdma_desc __iomem *desc, int num_desc) in cpdma_desc_free() argument
527 ctlr->num_tx_desc = ctlr->pool->num_desc / 2; in cpdma_ctlr_create()
528 ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc; in cpdma_ctlr_create()
1435 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; in cpdma_set_num_rx_descs()
1439 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; in cpdma_set_num_rx_descs()
/Linux-v5.4/drivers/mailbox/
Dbcm-pdc-mailbox.c716 u32 num_desc; in pdc_tx_list_sg_add() local
721 num_desc = (u32)sg_nents(sg); in pdc_tx_list_sg_add()
726 if (unlikely(num_desc > tx_avail)) { in pdc_tx_list_sg_add()
883 u32 num_desc; in pdc_rx_list_sg_add() local
888 num_desc = (u32)sg_nents(sg); in pdc_rx_list_sg_add()
892 if (unlikely(num_desc > rx_avail)) { in pdc_rx_list_sg_add()
/Linux-v5.4/drivers/dma/
Dmic_x100_dma.c147 int num_desc = len / max_xfer_size + 3; in mic_dma_prog_memcpy_desc() local
151 num_desc++; in mic_dma_prog_memcpy_desc()
153 ret = mic_dma_avail_desc_ring_space(ch, num_desc); in mic_dma_prog_memcpy_desc()

12