Lines Matching full:ring

55 	struct mlx4_en_tx_ring *ring;  in mlx4_en_create_tx_ring()  local
59 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); in mlx4_en_create_tx_ring()
60 if (!ring) { in mlx4_en_create_tx_ring()
61 en_err(priv, "Failed allocating TX ring\n"); in mlx4_en_create_tx_ring()
65 ring->size = size; in mlx4_en_create_tx_ring()
66 ring->size_mask = size - 1; in mlx4_en_create_tx_ring()
67 ring->sp_stride = stride; in mlx4_en_create_tx_ring()
68 ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; in mlx4_en_create_tx_ring()
71 ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node); in mlx4_en_create_tx_ring()
72 if (!ring->tx_info) { in mlx4_en_create_tx_ring()
77 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", in mlx4_en_create_tx_ring()
78 ring->tx_info, tmp); in mlx4_en_create_tx_ring()
80 ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node); in mlx4_en_create_tx_ring()
81 if (!ring->bounce_buf) { in mlx4_en_create_tx_ring()
82 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); in mlx4_en_create_tx_ring()
83 if (!ring->bounce_buf) { in mlx4_en_create_tx_ring()
88 ring->buf_size = ALIGN(size * ring->sp_stride, MLX4_EN_PAGE_SIZE); in mlx4_en_create_tx_ring()
92 err = mlx4_alloc_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size); in mlx4_en_create_tx_ring()
99 ring->buf = ring->sp_wqres.buf.direct.buf; in mlx4_en_create_tx_ring()
101 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n", in mlx4_en_create_tx_ring()
102 ring, ring->buf, ring->size, ring->buf_size, in mlx4_en_create_tx_ring()
103 (unsigned long long) ring->sp_wqres.buf.direct.map); in mlx4_en_create_tx_ring()
105 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, in mlx4_en_create_tx_ring()
109 en_err(priv, "failed reserving qp for TX ring\n"); in mlx4_en_create_tx_ring()
113 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp); in mlx4_en_create_tx_ring()
115 en_err(priv, "Failed allocating qp %d\n", ring->qpn); in mlx4_en_create_tx_ring()
118 ring->sp_qp.event = mlx4_en_sqp_event; in mlx4_en_create_tx_ring()
120 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); in mlx4_en_create_tx_ring()
123 ring->bf.uar = &mdev->priv_uar; in mlx4_en_create_tx_ring()
124 ring->bf.uar->map = mdev->uar_map; in mlx4_en_create_tx_ring()
125 ring->bf_enabled = false; in mlx4_en_create_tx_ring()
126 ring->bf_alloced = false; in mlx4_en_create_tx_ring()
129 ring->bf_alloced = true; in mlx4_en_create_tx_ring()
130 ring->bf_enabled = !!(priv->pflags & in mlx4_en_create_tx_ring()
134 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; in mlx4_en_create_tx_ring()
135 ring->queue_index = queue_index; in mlx4_en_create_tx_ring()
140 &ring->sp_affinity_mask); in mlx4_en_create_tx_ring()
142 *pring = ring; in mlx4_en_create_tx_ring()
146 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); in mlx4_en_create_tx_ring()
148 mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size); in mlx4_en_create_tx_ring()
150 kfree(ring->bounce_buf); in mlx4_en_create_tx_ring()
151 ring->bounce_buf = NULL; in mlx4_en_create_tx_ring()
153 kvfree(ring->tx_info); in mlx4_en_create_tx_ring()
154 ring->tx_info = NULL; in mlx4_en_create_tx_ring()
156 kfree(ring); in mlx4_en_create_tx_ring()
165 struct mlx4_en_tx_ring *ring = *pring; in mlx4_en_destroy_tx_ring() local
166 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); in mlx4_en_destroy_tx_ring()
168 if (ring->bf_alloced) in mlx4_en_destroy_tx_ring()
169 mlx4_bf_free(mdev->dev, &ring->bf); in mlx4_en_destroy_tx_ring()
170 mlx4_qp_remove(mdev->dev, &ring->sp_qp); in mlx4_en_destroy_tx_ring()
171 mlx4_qp_free(mdev->dev, &ring->sp_qp); in mlx4_en_destroy_tx_ring()
172 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); in mlx4_en_destroy_tx_ring()
173 mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size); in mlx4_en_destroy_tx_ring()
174 kfree(ring->bounce_buf); in mlx4_en_destroy_tx_ring()
175 ring->bounce_buf = NULL; in mlx4_en_destroy_tx_ring()
176 kvfree(ring->tx_info); in mlx4_en_destroy_tx_ring()
177 ring->tx_info = NULL; in mlx4_en_destroy_tx_ring()
178 kfree(ring); in mlx4_en_destroy_tx_ring()
183 struct mlx4_en_tx_ring *ring, in mlx4_en_activate_tx_ring() argument
189 ring->sp_cqn = cq; in mlx4_en_activate_tx_ring()
190 ring->prod = 0; in mlx4_en_activate_tx_ring()
191 ring->cons = 0xffffffff; in mlx4_en_activate_tx_ring()
192 ring->last_nr_txbb = 1; in mlx4_en_activate_tx_ring()
193 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); in mlx4_en_activate_tx_ring()
194 memset(ring->buf, 0, ring->buf_size); in mlx4_en_activate_tx_ring()
195 ring->free_tx_desc = mlx4_en_free_tx_desc; in mlx4_en_activate_tx_ring()
197 ring->sp_qp_state = MLX4_QP_STATE_RST; in mlx4_en_activate_tx_ring()
198 ring->doorbell_qpn = cpu_to_be32(ring->sp_qp.qpn << 8); in mlx4_en_activate_tx_ring()
199 ring->mr_key = cpu_to_be32(mdev->mr.key); in mlx4_en_activate_tx_ring()
201 mlx4_en_fill_qp_context(priv, ring->size, ring->sp_stride, 1, 0, ring->qpn, in mlx4_en_activate_tx_ring()
202 ring->sp_cqn, user_prio, &ring->sp_context); in mlx4_en_activate_tx_ring()
203 if (ring->bf_alloced) in mlx4_en_activate_tx_ring()
204 ring->sp_context.usr_page = in mlx4_en_activate_tx_ring()
206 ring->bf.uar->index)); in mlx4_en_activate_tx_ring()
208 err = mlx4_qp_to_ready(mdev->dev, &ring->sp_wqres.mtt, &ring->sp_context, in mlx4_en_activate_tx_ring()
209 &ring->sp_qp, &ring->sp_qp_state); in mlx4_en_activate_tx_ring()
210 if (!cpumask_empty(&ring->sp_affinity_mask)) in mlx4_en_activate_tx_ring()
211 netif_set_xps_queue(priv->dev, &ring->sp_affinity_mask, in mlx4_en_activate_tx_ring()
212 ring->queue_index); in mlx4_en_activate_tx_ring()
218 struct mlx4_en_tx_ring *ring) in mlx4_en_deactivate_tx_ring() argument
222 mlx4_qp_modify(mdev->dev, NULL, ring->sp_qp_state, in mlx4_en_deactivate_tx_ring()
223 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->sp_qp); in mlx4_en_deactivate_tx_ring()
226 static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring) in mlx4_en_is_tx_ring_full() argument
228 return ring->prod - ring->cons > ring->full_size; in mlx4_en_is_tx_ring_full()
232 struct mlx4_en_tx_ring *ring, int index, in mlx4_en_stamp_wqe() argument
236 struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE); in mlx4_en_stamp_wqe()
237 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; in mlx4_en_stamp_wqe()
238 void *end = ring->buf + ring->buf_size; in mlx4_en_stamp_wqe()
258 ptr = ring->buf; in mlx4_en_stamp_wqe()
266 struct mlx4_en_tx_ring *ring,
271 struct mlx4_en_tx_ring *ring, in mlx4_en_free_tx_desc() argument
275 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; in mlx4_en_free_tx_desc()
276 struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE); in mlx4_en_free_tx_desc()
278 void *end = ring->buf + ring->buf_size; in mlx4_en_free_tx_desc()
318 data = ring->buf + ((void *)data - end); in mlx4_en_free_tx_desc()
324 data = ring->buf; in mlx4_en_free_tx_desc()
338 struct mlx4_en_tx_ring *ring,
343 struct mlx4_en_tx_ring *ring, in mlx4_en_recycle_tx_desc() argument
347 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; in mlx4_en_recycle_tx_desc()
353 if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { in mlx4_en_recycle_tx_desc()
362 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) in mlx4_en_free_tx_buf() argument
368 ring->cons += ring->last_nr_txbb; in mlx4_en_free_tx_buf()
370 ring->cons, ring->prod); in mlx4_en_free_tx_buf()
372 if ((u32) (ring->prod - ring->cons) > ring->size) { in mlx4_en_free_tx_buf()
378 while (ring->cons != ring->prod) { in mlx4_en_free_tx_buf()
379 ring->last_nr_txbb = ring->free_tx_desc(priv, ring, in mlx4_en_free_tx_buf()
380 ring->cons & ring->size_mask, in mlx4_en_free_tx_buf()
382 ring->cons += ring->last_nr_txbb; in mlx4_en_free_tx_buf()
386 if (ring->tx_queue) in mlx4_en_free_tx_buf()
387 netdev_tx_reset_queue(ring->tx_queue); in mlx4_en_free_tx_buf()
396 u16 cqe_index, struct mlx4_en_tx_ring *ring) in mlx4_en_handle_err_cqe() argument
405 ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome); in mlx4_en_handle_err_cqe()
409 wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask; in mlx4_en_handle_err_cqe()
410 tx_info = &ring->tx_info[wqe_index]; in mlx4_en_handle_err_cqe()
412 en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn, in mlx4_en_handle_err_cqe()
414 tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE); in mlx4_en_handle_err_cqe()
429 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring]; in mlx4_en_process_tx_cq() local
436 u32 size_mask = ring->size_mask; in mlx4_en_process_tx_cq()
449 netdev_txq_bql_complete_prefetchw(ring->tx_queue); in mlx4_en_process_tx_cq()
453 last_nr_txbb = READ_ONCE(ring->last_nr_txbb); in mlx4_en_process_tx_cq()
454 ring_cons = READ_ONCE(ring->cons); in mlx4_en_process_tx_cq()
471 if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state)) in mlx4_en_process_tx_cq()
473 ring); in mlx4_en_process_tx_cq()
484 if (unlikely(ring->tx_info[ring_index].ts_requested)) in mlx4_en_process_tx_cq()
488 last_nr_txbb = INDIRECT_CALL_2(ring->free_tx_desc, in mlx4_en_process_tx_cq()
491 priv, ring, ring_index, in mlx4_en_process_tx_cq()
494 mlx4_en_stamp_wqe(priv, ring, stamp_index, in mlx4_en_process_tx_cq()
496 ring->size)); in mlx4_en_process_tx_cq()
500 bytes += ring->tx_info[ring_index].nr_bytes; in mlx4_en_process_tx_cq()
510 * the ring consumer. in mlx4_en_process_tx_cq()
517 WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb); in mlx4_en_process_tx_cq()
518 WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped); in mlx4_en_process_tx_cq()
523 netdev_tx_completed_queue(ring->tx_queue, packets, bytes); in mlx4_en_process_tx_cq()
525 /* Wakeup Tx queue if this stopped, and ring is not full. in mlx4_en_process_tx_cq()
527 if (netif_tx_queue_stopped(ring->tx_queue) && in mlx4_en_process_tx_cq()
528 !mlx4_en_is_tx_ring_full(ring)) { in mlx4_en_process_tx_cq()
529 netif_tx_wake_queue(ring->tx_queue); in mlx4_en_process_tx_cq()
530 ring->wake_queue++; in mlx4_en_process_tx_cq()
566 struct mlx4_en_tx_ring *ring, in mlx4_en_bounce_to_desc() argument
570 u32 copy = (ring->size - index) << LOG_TXBB_SIZE; in mlx4_en_bounce_to_desc()
577 *((u32 *) (ring->buf + i)) = in mlx4_en_bounce_to_desc()
578 *((u32 *) (ring->bounce_buf + copy + i)); in mlx4_en_bounce_to_desc()
585 *((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) = in mlx4_en_bounce_to_desc()
586 *((u32 *) (ring->bounce_buf + i)); in mlx4_en_bounce_to_desc()
590 return ring->buf + (index << LOG_TXBB_SIZE); in mlx4_en_bounce_to_desc()
743 void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring) in mlx4_en_xmit_doorbell() argument
756 (__force u32)ring->doorbell_qpn, in mlx4_en_xmit_doorbell()
757 ring->bf.uar->map + MLX4_SEND_DOORBELL); in mlx4_en_xmit_doorbell()
760 static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring, in mlx4_en_tx_write_desc() argument
779 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl, in mlx4_en_tx_write_desc()
784 ring->bf.offset ^= ring->bf.buf_size; in mlx4_en_tx_write_desc()
792 mlx4_en_xmit_doorbell(ring); in mlx4_en_tx_write_desc()
794 ring->xmit_more++; in mlx4_en_tx_write_desc()
867 struct mlx4_en_tx_ring *ring; in mlx4_en_xmit() local
888 ring = priv->tx_ring[TX][tx_ind]; in mlx4_en_xmit()
893 /* fetch ring->cons far ahead before needing it to avoid stall */ in mlx4_en_xmit()
894 ring_cons = READ_ONCE(ring->cons); in mlx4_en_xmit()
910 bf_ok = ring->bf_enabled; in mlx4_en_xmit()
925 netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); in mlx4_en_xmit()
929 (u32)(ring->prod - ring_cons - 1)); in mlx4_en_xmit()
932 index = ring->prod & ring->size_mask; in mlx4_en_xmit()
933 bf_index = ring->prod; in mlx4_en_xmit()
937 if (likely(index + nr_txbb <= ring->size)) in mlx4_en_xmit()
938 tx_desc = ring->buf + (index << LOG_TXBB_SIZE); in mlx4_en_xmit()
940 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; in mlx4_en_xmit()
945 /* Save skb in tx_info ring */ in mlx4_en_xmit()
946 tx_info = &ring->tx_info[index]; in mlx4_en_xmit()
972 lso_header_size, ring->mr_key, in mlx4_en_xmit()
981 if (unlikely(ring->hwtstamp_tx_type == HWTSTAMP_TX_ON && in mlx4_en_xmit()
996 ring->tx_csum++; in mlx4_en_xmit()
1016 ((ring->prod & ring->size) ? in mlx4_en_xmit()
1027 ring->tso_packets++; in mlx4_en_xmit()
1031 ring->packets += i; in mlx4_en_xmit()
1035 ((ring->prod & ring->size) ? in mlx4_en_xmit()
1038 ring->packets++; in mlx4_en_xmit()
1040 ring->bytes += tx_info->nr_bytes; in mlx4_en_xmit()
1064 ring->prod += nr_txbb; in mlx4_en_xmit()
1068 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); in mlx4_en_xmit()
1073 stop_queue = mlx4_en_is_tx_ring_full(ring); in mlx4_en_xmit()
1075 netif_tx_stop_queue(ring->tx_queue); in mlx4_en_xmit()
1076 ring->queue_stopped++; in mlx4_en_xmit()
1079 send_doorbell = __netdev_tx_sent_queue(ring->tx_queue, in mlx4_en_xmit()
1088 qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size); in mlx4_en_xmit()
1092 mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, desc_size, bf_index, in mlx4_en_xmit()
1099 * Need a memory barrier to make sure ring->cons was not in mlx4_en_xmit()
1104 if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { in mlx4_en_xmit()
1105 netif_tx_wake_queue(ring->tx_queue); in mlx4_en_xmit()
1106 ring->wake_queue++; in mlx4_en_xmit()
1112 ring->tx_dropped++; in mlx4_en_xmit()
1123 struct mlx4_en_tx_ring *ring) in mlx4_en_init_tx_xdp_ring_descs() argument
1127 for (i = 0; i < ring->size; i++) { in mlx4_en_init_tx_xdp_ring_descs()
1128 struct mlx4_en_tx_info *tx_info = &ring->tx_info[i]; in mlx4_en_init_tx_xdp_ring_descs()
1129 struct mlx4_en_tx_desc *tx_desc = ring->buf + in mlx4_en_init_tx_xdp_ring_descs()
1140 tx_desc->data.lkey = ring->mr_key; in mlx4_en_init_tx_xdp_ring_descs()
1154 struct mlx4_en_tx_ring *ring; in mlx4_en_xmit_frame() local
1162 ring = priv->tx_ring[TX_XDP][tx_ind]; in mlx4_en_xmit_frame()
1164 if (unlikely(mlx4_en_is_tx_ring_full(ring))) in mlx4_en_xmit_frame()
1167 index = ring->prod & ring->size_mask; in mlx4_en_xmit_frame()
1168 tx_info = &ring->tx_info[index]; in mlx4_en_xmit_frame()
1172 (u32)(ring->prod - READ_ONCE(ring->cons) - 1)); in mlx4_en_xmit_frame()
1174 tx_desc = ring->buf + (index << LOG_TXBB_SIZE); in mlx4_en_xmit_frame()
1194 ((ring->prod & ring->size) ? in mlx4_en_xmit_frame()
1200 ring->prod += MLX4_EN_XDP_TX_NRTXBB; in mlx4_en_xmit_frame()
1207 ring->xmit_more++; in mlx4_en_xmit_frame()