/Linux-v5.4/drivers/infiniband/hw/hfi1/ |
D | vnic_main.c | 270 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_tx_counters() argument 273 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_tx_counters() 299 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_rx_counters() argument 302 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_rx_counters() 351 u8 q_idx) in hfi1_vnic_maybe_stop_tx() argument 353 netif_stop_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx() 354 if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx)) in hfi1_vnic_maybe_stop_tx() 357 netif_start_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx() 364 u8 pad_len, q_idx = skb->queue_mapping; in hfi1_netdev_start_xmit() local 371 v_dbg("xmit: queue %d skb len %d\n", q_idx, skb->len); in hfi1_netdev_start_xmit() [all …]
|
D | vnic_sdma.c | 167 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, in hfi1_vnic_send_dma() argument 171 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_send_dma() 265 if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx)) in hfi1_vnic_sdma_wakeup() 266 netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx); in hfi1_vnic_sdma_wakeup() 270 u8 q_idx) in hfi1_vnic_sdma_write_avail() argument 272 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_sdma_write_avail() 290 vnic_sdma->q_idx = i; in hfi1_vnic_sdma_init()
|
D | vnic.h | 90 u8 q_idx; member 156 u8 q_idx); 165 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
|
/Linux-v5.4/drivers/net/hyperv/ |
D | netvsc.c | 281 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) in netvsc_alloc_recv_comp_ring() argument 283 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; in netvsc_alloc_recv_comp_ring() 679 u16 q_idx = 0; in netvsc_send_tx_complete() local 691 q_idx = packet->q_idx; in netvsc_send_tx_complete() 693 tx_stats = &net_device->chan_table[q_idx].tx_stats; in netvsc_send_tx_complete() 704 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); in netvsc_send_tx_complete() 710 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); in netvsc_send_tx_complete() 814 &net_device->chan_table[packet->q_idx]; in netvsc_send_pkt() 818 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); in netvsc_send_pkt() 922 nvchan = &net_device->chan_table[packet->q_idx]; in netvsc_send() [all …]
|
D | netvsc_drv.c | 301 int q_idx; in netvsc_get_tx_queue() local 303 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & in netvsc_get_tx_queue() 307 if (q_idx != old_idx && in netvsc_get_tx_queue() 309 sk_tx_queue_set(sk, q_idx); in netvsc_get_tx_queue() 311 return q_idx; in netvsc_get_tx_queue() 327 int q_idx = sk_tx_queue_get(skb->sk); in netvsc_pick_tx() local 329 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { in netvsc_pick_tx() 334 q_idx = skb_get_rx_queue(skb); in netvsc_pick_tx() 336 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); in netvsc_pick_tx() 339 return q_idx; in netvsc_pick_tx() [all …]
|
D | hyperv_net.h | 127 u16 q_idx; member 187 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
|
/Linux-v5.4/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_queues.h | 354 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); 355 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); 356 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); 357 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
|
D | nicvf_queues.c | 1722 static u64 nicvf_int_type_to_mask(int int_type, int q_idx) in nicvf_int_type_to_mask() argument 1728 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); in nicvf_int_type_to_mask() 1731 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); in nicvf_int_type_to_mask() 1734 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); in nicvf_int_type_to_mask() 1756 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_enable_intr() argument 1758 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_enable_intr() 1770 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_disable_intr() argument 1772 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_disable_intr() 1784 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_clear_intr() argument 1786 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_clear_intr() [all …]
|
/Linux-v5.4/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_pf.c | 502 u16 vsi, queue, pc, q_idx; in fm10k_configure_dglort_map_pf() local 519 q_idx = dglort->queue_b; in fm10k_configure_dglort_map_pf() 523 for (queue = 0; queue < queue_count; queue++, q_idx++) { in fm10k_configure_dglort_map_pf() 524 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf() 527 fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf() 528 fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf() 538 q_idx = pc + dglort->queue_b; in fm10k_configure_dglort_map_pf() 540 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf() 543 txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx)); in fm10k_configure_dglort_map_pf() 546 fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl); in fm10k_configure_dglort_map_pf() [all …]
|
D | fm10k_pci.c | 1175 int q_idx; in fm10k_napi_enable_all() local 1177 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { in fm10k_napi_enable_all() 1178 q_vector = interface->q_vector[q_idx]; in fm10k_napi_enable_all() 1872 int q_idx; in fm10k_napi_disable_all() local 1874 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { in fm10k_napi_disable_all() 1875 q_vector = interface->q_vector[q_idx]; in fm10k_napi_disable_all()
|
D | fm10k_main.c | 1885 int q_idx = pc; in fm10k_cache_ring_qos() local 1888 interface->tx_ring[offset + i]->reg_idx = q_idx; in fm10k_cache_ring_qos() 1890 interface->rx_ring[offset + i]->reg_idx = q_idx; in fm10k_cache_ring_qos() 1892 q_idx += pc_stride; in fm10k_cache_ring_qos()
|
/Linux-v5.4/drivers/net/ethernet/intel/iavf/ |
D | iavf_main.c | 893 int q_idx; in iavf_napi_enable_all() local 897 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_enable_all() 900 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_enable_all() 912 int q_idx; in iavf_napi_disable_all() local 916 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_disable_all() 917 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_disable_all() 1337 int q_idx = 0, num_q_vectors; in iavf_alloc_q_vectors() local 1346 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { in iavf_alloc_q_vectors() 1347 q_vector = &adapter->q_vectors[q_idx]; in iavf_alloc_q_vectors() 1350 q_vector->v_idx = q_idx; in iavf_alloc_q_vectors() [all …]
|
/Linux-v5.4/drivers/net/ethernet/intel/igc/ |
D | igc_defines.h | 396 #define IGC_VLAPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4)) argument
|
/Linux-v5.4/net/sched/ |
D | sch_api.c | 1678 int ret = 0, q_idx = *q_idx_p; in tc_dump_qdisc_root() local 1686 if (q_idx < s_q_idx) { in tc_dump_qdisc_root() 1687 q_idx++; in tc_dump_qdisc_root() 1694 q_idx++; in tc_dump_qdisc_root() 1707 if (q_idx < s_q_idx) { in tc_dump_qdisc_root() 1708 q_idx++; in tc_dump_qdisc_root() 1716 q_idx++; in tc_dump_qdisc_root() 1720 *q_idx_p = q_idx; in tc_dump_qdisc_root() 1730 int idx, q_idx; in tc_dump_qdisc() local 1738 s_q_idx = q_idx = cb->args[1]; in tc_dump_qdisc() [all …]
|
/Linux-v5.4/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/ |
D | trx.c | 534 u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) in rtl92ee_get_available_desc() argument 542 get_desc_addr_fr_q_idx(q_idx)); in rtl92ee_get_available_desc() 898 u8 q_idx = *val; in rtl92ee_set_desc() local 909 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx]; in rtl92ee_set_desc() 912 if (q_idx == BEACON_QUEUE) { in rtl92ee_set_desc() 923 get_desc_addr_fr_q_idx(q_idx), in rtl92ee_set_desc()
|
/Linux-v5.4/drivers/net/ethernet/qlogic/qed/ |
D | qed_hw.h | 293 u8 q_idx; member
|
/Linux-v5.4/drivers/net/ethernet/intel/igb/ |
D | e1000_defines.h | 1046 #define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4)) argument
|
/Linux-v5.4/drivers/net/ethernet/cavium/liquidio/ |
D | lio_vf_main.c | 1412 int q_idx = 0, iq_no = 0; in liquidio_xmit() local 1424 q_idx = skb_iq(lio->oct_dev, skb); in liquidio_xmit() 1425 tag = q_idx; in liquidio_xmit() 1426 iq_no = lio->linfo.txpciq[q_idx].s.q_no; in liquidio_xmit() 1504 spin_lock(&lio->glist_lock[q_idx]); in liquidio_xmit() 1506 lio_list_delete_head(&lio->glist[q_idx]); in liquidio_xmit() 1507 spin_unlock(&lio->glist_lock[q_idx]); in liquidio_xmit() 1601 netif_stop_subqueue(netdev, q_idx); in liquidio_xmit()
|
D | lio_main.c | 2331 int q_idx = 0, iq_no = 0; in liquidio_xmit() local 2339 q_idx = skb_iq(oct, skb); in liquidio_xmit() 2340 tag = q_idx; in liquidio_xmit() 2341 iq_no = lio->linfo.txpciq[q_idx].s.q_no; in liquidio_xmit() 2430 spin_lock(&lio->glist_lock[q_idx]); in liquidio_xmit() 2432 lio_list_delete_head(&lio->glist[q_idx]); in liquidio_xmit() 2433 spin_unlock(&lio->glist_lock[q_idx]); in liquidio_xmit() 2536 netif_stop_subqueue(netdev, q_idx); in liquidio_xmit()
|
/Linux-v5.4/drivers/net/wireless/mediatek/mt76/mt7615/ |
D | mac.c | 316 u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; in mt7615_mac_write_txwi() local 338 q_idx = wmm_idx * MT7615_MAX_WMM_SETS + in mt7615_mac_write_txwi() 342 q_idx = MT_LMAC_BCN0; in mt7615_mac_write_txwi() 345 q_idx = MT_LMAC_ALTX0; in mt7615_mac_write_txwi() 351 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); in mt7615_mac_write_txwi()
|
D | mcu.c | 55 u8 seq, q_idx, pkt_fmt; in __mt7615_mcu_msg_send() local 69 q_idx = MT_TX_MCU_PORT_RX_Q0; in __mt7615_mcu_msg_send() 72 q_idx = MT_TX_MCU_PORT_RX_FWDL; in __mt7615_mcu_msg_send() 80 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); in __mt7615_mcu_msg_send() 89 mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, q_idx)); in __mt7615_mcu_msg_send()
|
/Linux-v5.4/drivers/net/ethernet/intel/ice/ |
D | ice_lib.c | 1784 u16 q_idx = 0, i; in ice_vsi_cfg_txqs() local 1800 err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset, in ice_vsi_cfg_txqs() 1805 q_idx++; in ice_vsi_cfg_txqs() 2250 u16 i, q_idx = 0; in ice_vsi_stop_tx_rings() local 2265 if (!rings || !rings[q_idx]) in ice_vsi_stop_tx_rings() 2268 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings() 2271 rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings() 2276 q_idx++; in ice_vsi_stop_tx_rings()
|
/Linux-v5.4/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_sriov.h | 437 u16 q_idx, u16 sb_idx); 442 u16 q_idx, u16 sb_idx);
|
/Linux-v5.4/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf_main.c | 2146 int q_idx; in ixgbevf_napi_enable_all() local 2150 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in ixgbevf_napi_enable_all() 2151 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_enable_all() 2158 int q_idx; in ixgbevf_napi_disable_all() local 2162 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in ixgbevf_napi_disable_all() 2163 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_disable_all()
|
/Linux-v5.4/drivers/net/ethernet/ti/ |
D | cpsw.c | 1803 int ret, q_idx; in cpsw_ndo_start_xmit() local 1815 q_idx = skb_get_queue_mapping(skb); in cpsw_ndo_start_xmit() 1816 if (q_idx >= cpsw->tx_ch_num) in cpsw_ndo_start_xmit() 1817 q_idx = q_idx % cpsw->tx_ch_num; in cpsw_ndo_start_xmit() 1819 txch = cpsw->txv[q_idx].ch; in cpsw_ndo_start_xmit() 1820 txq = netdev_get_tx_queue(ndev, q_idx); in cpsw_ndo_start_xmit()
|