Lines Matching refs:q_vector
319 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in fm10k_fetch_rx_buffer()
556 static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, in fm10k_receive_skb() argument
559 napi_gro_receive(&q_vector->napi, skb); in fm10k_receive_skb()
562 static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, in fm10k_clean_rx_irq() argument
612 fm10k_receive_skb(q_vector, skb); in fm10k_clean_rx_irq()
628 q_vector->rx.total_packets += total_packets; in fm10k_clean_rx_irq()
629 q_vector->rx.total_bytes += total_bytes; in fm10k_clean_rx_irq()
1115 struct fm10k_intfc *interface = ring->q_vector->interface; in fm10k_get_tx_pending()
1179 static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, in fm10k_clean_tx_irq() argument
1182 struct fm10k_intfc *interface = q_vector->interface; in fm10k_clean_tx_irq()
1186 unsigned int budget = q_vector->tx.work_limit; in fm10k_clean_tx_irq()
1274 q_vector->tx.total_bytes += total_bytes; in fm10k_clean_tx_irq()
1275 q_vector->tx.total_packets += total_packets; in fm10k_clean_tx_irq()
1400 static void fm10k_qv_enable(struct fm10k_q_vector *q_vector) in fm10k_qv_enable() argument
1406 fm10k_update_itr(&q_vector->tx); in fm10k_qv_enable()
1409 fm10k_update_itr(&q_vector->rx); in fm10k_qv_enable()
1412 itr |= (q_vector->tx.itr & FM10K_ITR_MAX); in fm10k_qv_enable()
1415 itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT; in fm10k_qv_enable()
1418 writel(itr, q_vector->itr); in fm10k_qv_enable()
1423 struct fm10k_q_vector *q_vector = in fm10k_poll() local
1429 fm10k_for_each_ring(ring, q_vector->tx) { in fm10k_poll()
1430 if (!fm10k_clean_tx_irq(q_vector, ring, budget)) in fm10k_poll()
1441 if (q_vector->rx.count > 1) in fm10k_poll()
1442 per_ring_budget = max(budget / q_vector->rx.count, 1); in fm10k_poll()
1446 fm10k_for_each_ring(ring, q_vector->rx) { in fm10k_poll()
1447 int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget); in fm10k_poll()
1462 fm10k_qv_enable(q_vector); in fm10k_poll()
1594 struct fm10k_q_vector *q_vector; in fm10k_alloc_q_vector() local
1601 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL); in fm10k_alloc_q_vector()
1602 if (!q_vector) in fm10k_alloc_q_vector()
1606 netif_napi_add(interface->netdev, &q_vector->napi, fm10k_poll); in fm10k_alloc_q_vector()
1609 interface->q_vector[v_idx] = q_vector; in fm10k_alloc_q_vector()
1610 q_vector->interface = interface; in fm10k_alloc_q_vector()
1611 q_vector->v_idx = v_idx; in fm10k_alloc_q_vector()
1614 ring = q_vector->ring; in fm10k_alloc_q_vector()
1617 q_vector->tx.ring = ring; in fm10k_alloc_q_vector()
1618 q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; in fm10k_alloc_q_vector()
1619 q_vector->tx.itr = interface->tx_itr; in fm10k_alloc_q_vector()
1620 q_vector->tx.itr_scale = interface->hw.mac.itr_scale; in fm10k_alloc_q_vector()
1621 q_vector->tx.count = txr_count; in fm10k_alloc_q_vector()
1629 ring->q_vector = q_vector; in fm10k_alloc_q_vector()
1647 q_vector->rx.ring = ring; in fm10k_alloc_q_vector()
1648 q_vector->rx.itr = interface->rx_itr; in fm10k_alloc_q_vector()
1649 q_vector->rx.itr_scale = interface->hw.mac.itr_scale; in fm10k_alloc_q_vector()
1650 q_vector->rx.count = rxr_count; in fm10k_alloc_q_vector()
1659 ring->q_vector = q_vector; in fm10k_alloc_q_vector()
1676 fm10k_dbg_q_vector_init(q_vector); in fm10k_alloc_q_vector()
1692 struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; in fm10k_free_q_vector() local
1695 fm10k_dbg_q_vector_exit(q_vector); in fm10k_free_q_vector()
1697 fm10k_for_each_ring(ring, q_vector->tx) in fm10k_free_q_vector()
1700 fm10k_for_each_ring(ring, q_vector->rx) in fm10k_free_q_vector()
1703 interface->q_vector[v_idx] = NULL; in fm10k_free_q_vector()
1704 netif_napi_del(&q_vector->napi); in fm10k_free_q_vector()
1705 kfree_rcu(q_vector, rcu); in fm10k_free_q_vector()