/Linux-v5.15/drivers/net/ethernet/intel/ice/ |
D | ice_xsk.c | 20 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) in ice_qp_reset_stats() argument 22 memset(&vsi->rx_rings[q_idx]->rx_stats, 0, in ice_qp_reset_stats() 23 sizeof(vsi->rx_rings[q_idx]->rx_stats)); in ice_qp_reset_stats() 24 memset(&vsi->tx_rings[q_idx]->stats, 0, in ice_qp_reset_stats() 25 sizeof(vsi->tx_rings[q_idx]->stats)); in ice_qp_reset_stats() 27 memset(&vsi->xdp_rings[q_idx]->stats, 0, in ice_qp_reset_stats() 28 sizeof(vsi->xdp_rings[q_idx]->stats)); in ice_qp_reset_stats() 36 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) in ice_qp_clean_rings() argument 38 ice_clean_tx_ring(vsi->tx_rings[q_idx]); in ice_qp_clean_rings() 40 ice_clean_tx_ring(vsi->xdp_rings[q_idx]); in ice_qp_clean_rings() [all …]
|
D | ice_lib.h | 15 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx); 17 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx);
|
D | ice_lib.c | 1705 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) in ice_vsi_cfg_single_rxq() argument 1707 if (q_idx >= vsi->num_rxq) in ice_vsi_cfg_single_rxq() 1710 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); in ice_vsi_cfg_single_rxq() 1713 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx) in ice_vsi_cfg_single_txq() argument 1718 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) in ice_vsi_cfg_single_txq() 1727 err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); in ice_vsi_cfg_single_txq() 1772 u16 q_idx = 0; in ice_vsi_cfg_txqs() local 1781 for (q_idx = 0; q_idx < count; q_idx++) { in ice_vsi_cfg_txqs() 1782 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); in ice_vsi_cfg_txqs() 2062 u16 q_idx; in ice_vsi_stop_tx_rings() local [all …]
|
/Linux-v5.15/drivers/infiniband/hw/hfi1/ |
D | vnic_main.c | 124 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_tx_counters() argument 127 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_tx_counters() 153 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_rx_counters() argument 156 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_rx_counters() 205 u8 q_idx) in hfi1_vnic_maybe_stop_tx() argument 207 netif_stop_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx() 208 if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx)) in hfi1_vnic_maybe_stop_tx() 211 netif_start_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx() 218 u8 pad_len, q_idx = skb->queue_mapping; in hfi1_netdev_start_xmit() local 225 v_dbg("xmit: queue %d skb len %d\n", q_idx, skb->len); in hfi1_netdev_start_xmit() [all …]
|
D | vnic_sdma.c | 125 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, in hfi1_vnic_send_dma() argument 129 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_send_dma() 223 if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx)) in hfi1_vnic_sdma_wakeup() 224 netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx); in hfi1_vnic_sdma_wakeup() 228 u8 q_idx) in hfi1_vnic_sdma_write_avail() argument 230 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_sdma_write_avail() 248 vnic_sdma->q_idx = i; in hfi1_vnic_sdma_init()
|
D | vnic.h | 49 u8 q_idx; member 113 u8 q_idx); 122 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
|
D | ipoib_tx.c | 69 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq() 76 netif_wake_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_wake_txq() 137 le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx, in hfi1_ipoib_free_tx() 220 dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx); in hfi1_ipoib_add_tx() 750 txq->q_idx = i; in hfi1_ipoib_txreq_init() 818 txq->q_idx, in hfi1_ipoib_drain_tx_list() 881 __netif_subqueue_stopped(dev, txq->q_idx), in hfi1_ipoib_tx_timeout()
|
D | ipoib.h | 91 u8 q_idx; member
|
/Linux-v5.15/drivers/misc/habanalabs/common/ |
D | hw_queue.c | 408 u32 q_idx; in init_signal_cs() local 411 q_idx = job->hw_queue_id; in init_signal_cs() 412 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in init_signal_cs() 420 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx, in init_signal_cs() 429 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1, in init_signal_cs() 465 u32 q_idx; in init_wait_cs() local 467 q_idx = job->hw_queue_id; in init_wait_cs() 468 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in init_wait_cs() 482 cs->encaps_sig_hdl->q_idx, in init_wait_cs() 518 prop->base_mon_id, q_idx, cs->sequence); in init_wait_cs() [all …]
|
D | command_submission.c | 56 hw_sob->q_idx, hw_sob->sob_id); in hl_sob_reset_error() 1555 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx, in hl_cs_signal_sob_wraparound_handler() argument 1563 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in hl_cs_signal_sob_wraparound_handler() 1586 q_idx); in hl_cs_signal_sob_wraparound_handler() 1627 prop->curr_sob_offset, q_idx); in hl_cs_signal_sob_wraparound_handler() 1695 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset) in cs_ioctl_signal_wait_create_jobs() argument 1731 job->hw_queue_id = q_idx; in cs_ioctl_signal_wait_create_jobs() 1759 u32 q_idx, u32 count, in cs_ioctl_reserve_signals() argument 1779 if (q_idx >= hdev->asic_prop.max_queues) { in cs_ioctl_reserve_signals() 1781 q_idx); in cs_ioctl_reserve_signals() [all …]
|
/Linux-v5.15/drivers/net/hyperv/ |
D | netvsc.c | 319 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) in netvsc_alloc_recv_comp_ring() argument 321 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; in netvsc_alloc_recv_comp_ring() 756 u16 q_idx = 0; in netvsc_send_tx_complete() local 777 q_idx = packet->q_idx; in netvsc_send_tx_complete() 779 tx_stats = &net_device->chan_table[q_idx].tx_stats; in netvsc_send_tx_complete() 790 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); in netvsc_send_tx_complete() 796 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); in netvsc_send_tx_complete() 961 &net_device->chan_table[packet->q_idx]; in netvsc_send_pkt() 965 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); in netvsc_send_pkt() 1091 nvchan = &net_device->chan_table[packet->q_idx]; in netvsc_send() [all …]
|
D | netvsc_drv.c | 300 int q_idx; in netvsc_get_tx_queue() local 302 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & in netvsc_get_tx_queue() 306 if (q_idx != old_idx && in netvsc_get_tx_queue() 308 sk_tx_queue_set(sk, q_idx); in netvsc_get_tx_queue() 310 return q_idx; in netvsc_get_tx_queue() 326 int q_idx = sk_tx_queue_get(skb->sk); in netvsc_pick_tx() local 328 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { in netvsc_pick_tx() 333 q_idx = skb_get_rx_queue(skb); in netvsc_pick_tx() 335 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); in netvsc_pick_tx() 338 return q_idx; in netvsc_pick_tx() [all …]
|
/Linux-v5.15/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_queues.h | 354 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); 355 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); 356 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); 357 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
|
D | nicvf_queues.c | 1722 static u64 nicvf_int_type_to_mask(int int_type, int q_idx) in nicvf_int_type_to_mask() argument 1728 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); in nicvf_int_type_to_mask() 1731 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); in nicvf_int_type_to_mask() 1734 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); in nicvf_int_type_to_mask() 1756 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_enable_intr() argument 1758 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_enable_intr() 1770 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_disable_intr() argument 1772 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_disable_intr() 1784 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_clear_intr() argument 1786 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_clear_intr() [all …]
|
/Linux-v5.15/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_pf.c | 502 u16 vsi, queue, pc, q_idx; in fm10k_configure_dglort_map_pf() local 519 q_idx = dglort->queue_b; in fm10k_configure_dglort_map_pf() 523 for (queue = 0; queue < queue_count; queue++, q_idx++) { in fm10k_configure_dglort_map_pf() 524 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf() 527 fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf() 528 fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf() 538 q_idx = pc + dglort->queue_b; in fm10k_configure_dglort_map_pf() 540 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf() 543 txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx)); in fm10k_configure_dglort_map_pf() 546 fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl); in fm10k_configure_dglort_map_pf() [all …]
|
D | fm10k_pci.c | 1176 int q_idx; in fm10k_napi_enable_all() local 1178 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { in fm10k_napi_enable_all() 1179 q_vector = interface->q_vector[q_idx]; in fm10k_napi_enable_all() 1873 int q_idx; in fm10k_napi_disable_all() local 1875 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { in fm10k_napi_disable_all() 1876 q_vector = interface->q_vector[q_idx]; in fm10k_napi_disable_all()
|
/Linux-v5.15/drivers/net/ethernet/intel/iavf/ |
D | iavf_main.c | 904 int q_idx; in iavf_napi_enable_all() local 908 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_enable_all() 911 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_enable_all() 923 int q_idx; in iavf_napi_disable_all() local 927 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_disable_all() 928 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_disable_all() 1365 int q_idx = 0, num_q_vectors; in iavf_alloc_q_vectors() local 1374 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { in iavf_alloc_q_vectors() 1375 q_vector = &adapter->q_vectors[q_idx]; in iavf_alloc_q_vectors() 1378 q_vector->v_idx = q_idx; in iavf_alloc_q_vectors() [all …]
|
/Linux-v5.15/net/sched/ |
D | sch_api.c | 1695 int ret = 0, q_idx = *q_idx_p; in tc_dump_qdisc_root() local 1703 if (q_idx < s_q_idx) { in tc_dump_qdisc_root() 1704 q_idx++; in tc_dump_qdisc_root() 1711 q_idx++; in tc_dump_qdisc_root() 1724 if (q_idx < s_q_idx) { in tc_dump_qdisc_root() 1725 q_idx++; in tc_dump_qdisc_root() 1733 q_idx++; in tc_dump_qdisc_root() 1737 *q_idx_p = q_idx; in tc_dump_qdisc_root() 1747 int idx, q_idx; in tc_dump_qdisc() local 1755 s_q_idx = q_idx = cb->args[1]; in tc_dump_qdisc() [all …]
|
/Linux-v5.15/drivers/scsi/mpi3mr/ |
D | mpi3mr_fw.c | 1261 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) in mpi3mr_free_op_req_q_segments() argument 1267 segments = mrioc->req_qinfo[q_idx].q_segments; in mpi3mr_free_op_req_q_segments() 1273 if (mrioc->req_qinfo[q_idx].q_segment_list) { in mpi3mr_free_op_req_q_segments() 1276 mrioc->req_qinfo[q_idx].q_segment_list, in mpi3mr_free_op_req_q_segments() 1277 mrioc->req_qinfo[q_idx].q_segment_list_dma); in mpi3mr_free_op_req_q_segments() 1278 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; in mpi3mr_free_op_req_q_segments() 1281 size = mrioc->req_qinfo[q_idx].num_requests * in mpi3mr_free_op_req_q_segments() 1284 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { in mpi3mr_free_op_req_q_segments() 1291 kfree(mrioc->req_qinfo[q_idx].q_segments); in mpi3mr_free_op_req_q_segments() 1292 mrioc->req_qinfo[q_idx].q_segments = NULL; in mpi3mr_free_op_req_q_segments() [all …]
|
/Linux-v5.15/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/ |
D | trx.c | 534 u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) in rtl92ee_get_available_desc() argument 542 get_desc_addr_fr_q_idx(q_idx)); in rtl92ee_get_available_desc() 889 u8 q_idx = *val; in rtl92ee_set_desc() local 900 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx]; in rtl92ee_set_desc() 903 if (q_idx == BEACON_QUEUE) { in rtl92ee_set_desc() 914 get_desc_addr_fr_q_idx(q_idx), in rtl92ee_set_desc()
|
/Linux-v5.15/drivers/net/ethernet/intel/igc/ |
D | igc_defines.h | 628 #define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4)) argument
|
/Linux-v5.15/drivers/net/ethernet/qlogic/qed/ |
D | qed_hw.h | 267 u8 q_idx; member
|
/Linux-v5.15/drivers/net/ethernet/ti/ |
D | cpsw.c | 906 int ret, q_idx; in cpsw_ndo_start_xmit() local 918 q_idx = skb_get_queue_mapping(skb); in cpsw_ndo_start_xmit() 919 if (q_idx >= cpsw->tx_ch_num) in cpsw_ndo_start_xmit() 920 q_idx = q_idx % cpsw->tx_ch_num; in cpsw_ndo_start_xmit() 922 txch = cpsw->txv[q_idx].ch; in cpsw_ndo_start_xmit() 923 txq = netdev_get_tx_queue(ndev, q_idx); in cpsw_ndo_start_xmit()
|
D | cpsw_new.c | 922 int ret, q_idx; in cpsw_ndo_start_xmit() local 934 q_idx = skb_get_queue_mapping(skb); in cpsw_ndo_start_xmit() 935 if (q_idx >= cpsw->tx_ch_num) in cpsw_ndo_start_xmit() 936 q_idx = q_idx % cpsw->tx_ch_num; in cpsw_ndo_start_xmit() 938 txch = cpsw->txv[q_idx].ch; in cpsw_ndo_start_xmit() 939 txq = netdev_get_tx_queue(ndev, q_idx); in cpsw_ndo_start_xmit()
|
/Linux-v5.15/drivers/net/ethernet/intel/igb/ |
D | e1000_defines.h | 1046 #define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4)) argument
|