Home
last modified time | relevance | path

Searched refs:q_idx (Results 1 – 25 of 34) sorted by relevance

12

/Linux-v4.19/drivers/infiniband/hw/hfi1/
Dvnic_main.c268 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_tx_counters() argument
271 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_tx_counters()
297 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_rx_counters() argument
300 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_rx_counters()
349 u8 q_idx) in hfi1_vnic_maybe_stop_tx() argument
351 netif_stop_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx()
352 if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx)) in hfi1_vnic_maybe_stop_tx()
355 netif_start_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx()
362 u8 pad_len, q_idx = skb->queue_mapping; in hfi1_netdev_start_xmit() local
369 v_dbg("xmit: queue %d skb len %d\n", q_idx, skb->len); in hfi1_netdev_start_xmit()
[all …]
Dvnic_sdma.c171 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, in hfi1_vnic_send_dma() argument
175 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_send_dma()
269 if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx)) in hfi1_vnic_sdma_wakeup()
270 netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx); in hfi1_vnic_sdma_wakeup()
274 u8 q_idx) in hfi1_vnic_sdma_write_avail() argument
276 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_sdma_write_avail()
293 vnic_sdma->q_idx = i; in hfi1_vnic_sdma_init()
Dvnic.h90 u8 q_idx; member
156 u8 q_idx);
165 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
/Linux-v4.19/drivers/net/hyperv/
Dnetvsc.c291 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) in netvsc_alloc_recv_comp_ring() argument
293 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; in netvsc_alloc_recv_comp_ring()
686 u16 q_idx = 0; in netvsc_send_tx_complete() local
698 q_idx = packet->q_idx; in netvsc_send_tx_complete()
700 tx_stats = &net_device->chan_table[q_idx].tx_stats; in netvsc_send_tx_complete()
711 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); in netvsc_send_tx_complete()
717 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); in netvsc_send_tx_complete()
821 &net_device->chan_table[packet->q_idx]; in netvsc_send_pkt()
825 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); in netvsc_send_pkt()
925 nvchan = &net_device->chan_table[packet->q_idx]; in netvsc_send()
[all …]
Dnetvsc_drv.c291 int q_idx; in netvsc_get_tx_queue() local
293 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & in netvsc_get_tx_queue()
297 if (q_idx != old_idx && in netvsc_get_tx_queue()
299 sk_tx_queue_set(sk, q_idx); in netvsc_get_tx_queue()
301 return q_idx; in netvsc_get_tx_queue()
317 int q_idx = sk_tx_queue_get(skb->sk); in netvsc_pick_tx() local
319 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { in netvsc_pick_tx()
324 q_idx = skb_get_rx_queue(skb); in netvsc_pick_tx()
326 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); in netvsc_pick_tx()
329 return q_idx; in netvsc_pick_tx()
[all …]
Dhyperv_net.h139 u16 q_idx; member
195 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
/Linux-v4.19/drivers/net/ethernet/cavium/thunder/
Dnicvf_queues.h357 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
358 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
359 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
360 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
Dnicvf_queues.c1727 static u64 nicvf_int_type_to_mask(int int_type, int q_idx) in nicvf_int_type_to_mask() argument
1733 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); in nicvf_int_type_to_mask()
1736 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); in nicvf_int_type_to_mask()
1739 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); in nicvf_int_type_to_mask()
1761 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_enable_intr() argument
1763 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_enable_intr()
1775 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_disable_intr() argument
1777 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_disable_intr()
1789 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_clear_intr() argument
1791 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_clear_intr()
[all …]
/Linux-v4.19/drivers/net/ethernet/intel/fm10k/
Dfm10k_pf.c502 u16 vsi, queue, pc, q_idx; in fm10k_configure_dglort_map_pf() local
519 q_idx = dglort->queue_b; in fm10k_configure_dglort_map_pf()
523 for (queue = 0; queue < queue_count; queue++, q_idx++) { in fm10k_configure_dglort_map_pf()
524 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf()
527 fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf()
528 fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf()
538 q_idx = pc + dglort->queue_b; in fm10k_configure_dglort_map_pf()
540 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf()
543 txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx)); in fm10k_configure_dglort_map_pf()
546 fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl); in fm10k_configure_dglort_map_pf()
[all …]
Dfm10k_pci.c1172 int q_idx; in fm10k_napi_enable_all() local
1174 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { in fm10k_napi_enable_all()
1175 q_vector = interface->q_vector[q_idx]; in fm10k_napi_enable_all()
1870 int q_idx; in fm10k_napi_disable_all() local
1872 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { in fm10k_napi_disable_all()
1873 q_vector = interface->q_vector[q_idx]; in fm10k_napi_disable_all()
Dfm10k_main.c1877 int pc, offset, rss_i, i, q_idx; in fm10k_cache_ring_qos() local
1887 q_idx = pc; in fm10k_cache_ring_qos()
1889 interface->tx_ring[offset + i]->reg_idx = q_idx; in fm10k_cache_ring_qos()
1891 interface->rx_ring[offset + i]->reg_idx = q_idx; in fm10k_cache_ring_qos()
1893 q_idx += pc_stride; in fm10k_cache_ring_qos()
/Linux-v4.19/drivers/net/ethernet/intel/i40evf/
Di40evf_main.c917 int q_idx; in i40evf_napi_enable_all() local
921 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in i40evf_napi_enable_all()
924 q_vector = &adapter->q_vectors[q_idx]; in i40evf_napi_enable_all()
936 int q_idx; in i40evf_napi_disable_all() local
940 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in i40evf_napi_disable_all()
941 q_vector = &adapter->q_vectors[q_idx]; in i40evf_napi_disable_all()
1362 int q_idx = 0, num_q_vectors; in i40evf_alloc_q_vectors() local
1371 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { in i40evf_alloc_q_vectors()
1372 q_vector = &adapter->q_vectors[q_idx]; in i40evf_alloc_q_vectors()
1375 q_vector->v_idx = q_idx; in i40evf_alloc_q_vectors()
[all …]
/Linux-v4.19/net/sched/
Dsch_api.c1597 int ret = 0, q_idx = *q_idx_p; in tc_dump_qdisc_root() local
1605 if (q_idx < s_q_idx) { in tc_dump_qdisc_root()
1606 q_idx++; in tc_dump_qdisc_root()
1613 q_idx++; in tc_dump_qdisc_root()
1626 if (q_idx < s_q_idx) { in tc_dump_qdisc_root()
1627 q_idx++; in tc_dump_qdisc_root()
1635 q_idx++; in tc_dump_qdisc_root()
1639 *q_idx_p = q_idx; in tc_dump_qdisc_root()
1649 int idx, q_idx; in tc_dump_qdisc() local
1657 s_q_idx = q_idx = cb->args[1]; in tc_dump_qdisc()
[all …]
/Linux-v4.19/drivers/staging/rtlwifi/rtl8822be/
Dtrx.c401 u16 rtl8822be_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) in rtl8822be_get_available_desc() argument
404 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx]; in rtl8822be_get_available_desc()
837 u8 q_idx = *val; in rtl8822be_set_desc() local
844 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx]; in rtl8822be_set_desc()
847 if (q_idx == BEACON_QUEUE) { in rtl8822be_set_desc()
863 q_idx), in rtl8822be_set_desc()
/Linux-v4.19/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/
Dtrx.c550 u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) in rtl92ee_get_available_desc() argument
558 get_desc_addr_fr_q_idx(q_idx)); in rtl92ee_get_available_desc()
911 u8 q_idx = *val; in rtl92ee_set_desc() local
921 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx]; in rtl92ee_set_desc()
924 if (q_idx == BEACON_QUEUE) { in rtl92ee_set_desc()
935 get_desc_addr_fr_q_idx(q_idx), in rtl92ee_set_desc()
/Linux-v4.19/drivers/uio/
Duio_hv_generic.c133 u16 q_idx = channel->offermsg.offer.sub_channel_index; in hv_uio_ring_mmap() local
136 q_idx, vma_pages(vma), vma->vm_pgoff); in hv_uio_ring_mmap()
/Linux-v4.19/drivers/net/ethernet/qlogic/qed/
Dqed_hw.h293 u8 q_idx; member
/Linux-v4.19/drivers/net/ethernet/intel/igb/
De1000_defines.h1043 #define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4)) argument
/Linux-v4.19/drivers/net/ethernet/cavium/liquidio/
Dlio_vf_main.c1401 int q_idx = 0, iq_no = 0; in liquidio_xmit() local
1413 q_idx = skb_iq(lio->oct_dev, skb); in liquidio_xmit()
1414 tag = q_idx; in liquidio_xmit()
1415 iq_no = lio->linfo.txpciq[q_idx].s.q_no; in liquidio_xmit()
1493 spin_lock(&lio->glist_lock[q_idx]); in liquidio_xmit()
1495 lio_list_delete_head(&lio->glist[q_idx]); in liquidio_xmit()
1496 spin_unlock(&lio->glist_lock[q_idx]); in liquidio_xmit()
1591 netif_stop_subqueue(netdev, q_idx); in liquidio_xmit()
Dlio_main.c2338 int q_idx = 0, iq_no = 0; in liquidio_xmit() local
2346 q_idx = skb_iq(oct, skb); in liquidio_xmit()
2347 tag = q_idx; in liquidio_xmit()
2348 iq_no = lio->linfo.txpciq[q_idx].s.q_no; in liquidio_xmit()
2437 spin_lock(&lio->glist_lock[q_idx]); in liquidio_xmit()
2439 lio_list_delete_head(&lio->glist[q_idx]); in liquidio_xmit()
2440 spin_unlock(&lio->glist_lock[q_idx]); in liquidio_xmit()
2544 netif_stop_subqueue(netdev, q_idx); in liquidio_xmit()
/Linux-v4.19/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_sriov.h435 u16 q_idx, u16 sb_idx);
440 u16 q_idx, u16 sb_idx);
Dbnx2x_sriov.c131 u16 q_idx, u16 sb_idx) in bnx2x_vfop_qctor_dump_tx() argument
136 q_idx, in bnx2x_vfop_qctor_dump_tx()
147 u16 q_idx, u16 sb_idx) in bnx2x_vfop_qctor_dump_rx() argument
154 q_idx, in bnx2x_vfop_qctor_dump_rx()
/Linux-v4.19/drivers/net/ethernet/intel/ixgbevf/
Dixgbevf_main.c2131 int q_idx; in ixgbevf_napi_enable_all() local
2135 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in ixgbevf_napi_enable_all()
2136 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_enable_all()
2143 int q_idx; in ixgbevf_napi_disable_all() local
2147 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in ixgbevf_napi_disable_all()
2148 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_disable_all()
/Linux-v4.19/drivers/net/ethernet/intel/ice/
Dice_main.c4331 int q_idx; in ice_napi_enable_all() local
4336 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) in ice_napi_enable_all()
4337 napi_enable(&vsi->q_vectors[q_idx]->napi); in ice_napi_enable_all()
4815 int q_idx; in ice_napi_disable_all() local
4820 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) in ice_napi_disable_all()
4821 napi_disable(&vsi->q_vectors[q_idx]->napi); in ice_napi_disable_all()
/Linux-v4.19/drivers/net/ethernet/ti/
Dcpsw.c1988 int ret, q_idx; in cpsw_ndo_start_xmit() local
2000 q_idx = skb_get_queue_mapping(skb); in cpsw_ndo_start_xmit()
2001 if (q_idx >= cpsw->tx_ch_num) in cpsw_ndo_start_xmit()
2002 q_idx = q_idx % cpsw->tx_ch_num; in cpsw_ndo_start_xmit()
2004 txch = cpsw->txv[q_idx].ch; in cpsw_ndo_start_xmit()
2005 txq = netdev_get_tx_queue(ndev, q_idx); in cpsw_ndo_start_xmit()

12