1 /*
2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include "ena_eth_com.h"
34
ena_com_get_next_rx_cdesc(struct ena_com_io_cq * io_cq)35 static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
36 struct ena_com_io_cq *io_cq)
37 {
38 struct ena_eth_io_rx_cdesc_base *cdesc;
39 u16 expected_phase, head_masked;
40 u16 desc_phase;
41
42 head_masked = io_cq->head & (io_cq->q_depth - 1);
43 expected_phase = io_cq->phase;
44
45 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
46 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
47
48 desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
49 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
50
51 if (desc_phase != expected_phase)
52 return NULL;
53
54 /* Make sure we read the rest of the descriptor after the phase bit
55 * has been read
56 */
57 dma_rmb();
58
59 return cdesc;
60 }
61
ena_com_cq_inc_head(struct ena_com_io_cq * io_cq)62 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
63 {
64 io_cq->head++;
65
66 /* Switch phase bit in case of wrap around */
67 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
68 io_cq->phase ^= 1;
69 }
70
get_sq_desc(struct ena_com_io_sq * io_sq)71 static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
72 {
73 u16 tail_masked;
74 u32 offset;
75
76 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
77
78 offset = tail_masked * io_sq->desc_entry_size;
79
80 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
81 }
82
ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq * io_sq)83 static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
84 {
85 u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
86 u32 offset = tail_masked * io_sq->desc_entry_size;
87
88 /* In case this queue isn't a LLQ */
89 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
90 return;
91
92 memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
93 io_sq->desc_addr.virt_addr + offset,
94 io_sq->desc_entry_size);
95 }
96
ena_com_sq_update_tail(struct ena_com_io_sq * io_sq)97 static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
98 {
99 io_sq->tail++;
100
101 /* Switch phase bit in case of wrap around */
102 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
103 io_sq->phase ^= 1;
104 }
105
ena_com_write_header(struct ena_com_io_sq * io_sq,u8 * head_src,u16 header_len)106 static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
107 u8 *head_src, u16 header_len)
108 {
109 u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
110 u8 __iomem *dev_head_addr =
111 io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
112
113 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
114 return 0;
115
116 if (unlikely(!io_sq->header_addr)) {
117 pr_err("Push buffer header ptr is NULL\n");
118 return -EINVAL;
119 }
120
121 memcpy_toio(dev_head_addr, head_src, header_len);
122
123 return 0;
124 }
125
126 static inline struct ena_eth_io_rx_cdesc_base *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq * io_cq,u16 idx)127 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
128 {
129 idx &= (io_cq->q_depth - 1);
130 return (struct ena_eth_io_rx_cdesc_base *)
131 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
132 idx * io_cq->cdesc_entry_size_in_bytes);
133 }
134
ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq * io_cq,u16 * first_cdesc_idx)135 static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
136 u16 *first_cdesc_idx)
137 {
138 struct ena_eth_io_rx_cdesc_base *cdesc;
139 u16 count = 0, head_masked;
140 u32 last = 0;
141
142 do {
143 cdesc = ena_com_get_next_rx_cdesc(io_cq);
144 if (!cdesc)
145 break;
146
147 ena_com_cq_inc_head(io_cq);
148 count++;
149 last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
150 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
151 } while (!last);
152
153 if (last) {
154 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
155 count += io_cq->cur_rx_pkt_cdesc_count;
156
157 head_masked = io_cq->head & (io_cq->q_depth - 1);
158
159 io_cq->cur_rx_pkt_cdesc_count = 0;
160 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
161
162 pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
163 io_cq->qid, *first_cdesc_idx, count);
164 } else {
165 io_cq->cur_rx_pkt_cdesc_count += count;
166 count = 0;
167 }
168
169 return count;
170 }
171
ena_com_meta_desc_changed(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx)172 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
173 struct ena_com_tx_ctx *ena_tx_ctx)
174 {
175 int rc;
176
177 if (ena_tx_ctx->meta_valid) {
178 rc = memcmp(&io_sq->cached_tx_meta,
179 &ena_tx_ctx->ena_meta,
180 sizeof(struct ena_com_tx_meta));
181
182 if (unlikely(rc != 0))
183 return true;
184 }
185
186 return false;
187 }
188
ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx)189 static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
190 struct ena_com_tx_ctx *ena_tx_ctx)
191 {
192 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
193 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
194
195 meta_desc = get_sq_desc(io_sq);
196 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
197
198 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
199
200 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
201
202 /* bits 0-9 of the mss */
203 meta_desc->word2 |= (ena_meta->mss <<
204 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
205 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
206 /* bits 10-13 of the mss */
207 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
208 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
209 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
210
211 /* Extended meta desc */
212 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
213 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
214 meta_desc->len_ctrl |= (io_sq->phase <<
215 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
216 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
217
218 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
219 meta_desc->word2 |= ena_meta->l3_hdr_len &
220 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
221 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
222 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
223 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
224
225 meta_desc->word2 |= (ena_meta->l4_hdr_len <<
226 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
227 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
228
229 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
230
231 /* Cached the meta desc */
232 memcpy(&io_sq->cached_tx_meta, ena_meta,
233 sizeof(struct ena_com_tx_meta));
234
235 ena_com_copy_curr_sq_desc_to_dev(io_sq);
236 ena_com_sq_update_tail(io_sq);
237 }
238
ena_com_rx_set_flags(struct ena_com_rx_ctx * ena_rx_ctx,struct ena_eth_io_rx_cdesc_base * cdesc)239 static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
240 struct ena_eth_io_rx_cdesc_base *cdesc)
241 {
242 ena_rx_ctx->l3_proto = cdesc->status &
243 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
244 ena_rx_ctx->l4_proto =
245 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
246 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
247 ena_rx_ctx->l3_csum_err =
248 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
249 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
250 ena_rx_ctx->l4_csum_err =
251 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
252 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
253 ena_rx_ctx->hash = cdesc->hash;
254 ena_rx_ctx->frag =
255 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
256 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
257
258 pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
259 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
260 ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
261 ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
262 }
263
264 /*****************************************************************************/
265 /***************************** API **********************************/
266 /*****************************************************************************/
267
ena_com_prepare_tx(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,int * nb_hw_desc)268 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
269 struct ena_com_tx_ctx *ena_tx_ctx,
270 int *nb_hw_desc)
271 {
272 struct ena_eth_io_tx_desc *desc = NULL;
273 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
274 void *push_header = ena_tx_ctx->push_header;
275 u16 header_len = ena_tx_ctx->header_len;
276 u16 num_bufs = ena_tx_ctx->num_bufs;
277 int total_desc, i, rc;
278 bool have_meta;
279 u64 addr_hi;
280
281 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
282
283 /* num_bufs +1 for potential meta desc */
284 if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
285 pr_err("Not enough space in the tx queue\n");
286 return -ENOMEM;
287 }
288
289 if (unlikely(header_len > io_sq->tx_max_header_size)) {
290 pr_err("header size is too large %d max header: %d\n",
291 header_len, io_sq->tx_max_header_size);
292 return -EINVAL;
293 }
294
295 /* start with pushing the header (if needed) */
296 rc = ena_com_write_header(io_sq, push_header, header_len);
297 if (unlikely(rc))
298 return rc;
299
300 have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
301 ena_tx_ctx);
302 if (have_meta)
303 ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
304
305 /* If the caller doesn't want send packets */
306 if (unlikely(!num_bufs && !header_len)) {
307 *nb_hw_desc = have_meta ? 0 : 1;
308 return 0;
309 }
310
311 desc = get_sq_desc(io_sq);
312 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
313
314 /* Set first desc when we don't have meta descriptor */
315 if (!have_meta)
316 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
317
318 desc->buff_addr_hi_hdr_sz |= (header_len <<
319 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
320 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
321 desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
322 ENA_ETH_IO_TX_DESC_PHASE_MASK;
323
324 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
325
326 /* Bits 0-9 */
327 desc->meta_ctrl |= (ena_tx_ctx->req_id <<
328 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
329 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
330
331 desc->meta_ctrl |= (ena_tx_ctx->df <<
332 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
333 ENA_ETH_IO_TX_DESC_DF_MASK;
334
335 /* Bits 10-15 */
336 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
337 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
338 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
339
340 if (ena_tx_ctx->meta_valid) {
341 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
342 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
343 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
344 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
345 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
346 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
347 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
348 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
349 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
350 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
351 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
352 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
353 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
354 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
355 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
356 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
357 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
358 }
359
360 for (i = 0; i < num_bufs; i++) {
361 /* The first desc share the same desc as the header */
362 if (likely(i != 0)) {
363 ena_com_copy_curr_sq_desc_to_dev(io_sq);
364 ena_com_sq_update_tail(io_sq);
365
366 desc = get_sq_desc(io_sq);
367 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
368
369 desc->len_ctrl |= (io_sq->phase <<
370 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
371 ENA_ETH_IO_TX_DESC_PHASE_MASK;
372 }
373
374 desc->len_ctrl |= ena_bufs->len &
375 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
376
377 addr_hi = ((ena_bufs->paddr &
378 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
379
380 desc->buff_addr_lo = (u32)ena_bufs->paddr;
381 desc->buff_addr_hi_hdr_sz |= addr_hi &
382 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
383 ena_bufs++;
384 }
385
386 /* set the last desc indicator */
387 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
388
389 ena_com_copy_curr_sq_desc_to_dev(io_sq);
390
391 ena_com_sq_update_tail(io_sq);
392
393 total_desc = max_t(u16, num_bufs, 1);
394 total_desc += have_meta ? 1 : 0;
395
396 *nb_hw_desc = total_desc;
397 return 0;
398 }
399
ena_com_rx_pkt(struct ena_com_io_cq * io_cq,struct ena_com_io_sq * io_sq,struct ena_com_rx_ctx * ena_rx_ctx)400 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
401 struct ena_com_io_sq *io_sq,
402 struct ena_com_rx_ctx *ena_rx_ctx)
403 {
404 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
405 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
406 u16 cdesc_idx = 0;
407 u16 nb_hw_desc;
408 u16 i;
409
410 WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
411
412 nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
413 if (nb_hw_desc == 0) {
414 ena_rx_ctx->descs = nb_hw_desc;
415 return 0;
416 }
417
418 pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
419 nb_hw_desc);
420
421 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
422 pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
423 ena_rx_ctx->max_bufs);
424 return -ENOSPC;
425 }
426
427 for (i = 0; i < nb_hw_desc; i++) {
428 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
429
430 ena_buf->len = cdesc->length;
431 ena_buf->req_id = cdesc->req_id;
432 ena_buf++;
433 }
434
435 /* Update SQ head ptr */
436 io_sq->next_to_comp += nb_hw_desc;
437
438 pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
439 io_sq->next_to_comp);
440
441 /* Get rx flags from the last pkt */
442 ena_com_rx_set_flags(ena_rx_ctx, cdesc);
443
444 ena_rx_ctx->descs = nb_hw_desc;
445 return 0;
446 }
447
ena_com_add_single_rx_desc(struct ena_com_io_sq * io_sq,struct ena_com_buf * ena_buf,u16 req_id)448 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
449 struct ena_com_buf *ena_buf,
450 u16 req_id)
451 {
452 struct ena_eth_io_rx_desc *desc;
453
454 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
455
456 if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
457 return -ENOSPC;
458
459 desc = get_sq_desc(io_sq);
460 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
461
462 desc->length = ena_buf->len;
463
464 desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
465 desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
466 desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
467 desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
468
469 desc->req_id = req_id;
470
471 desc->buff_addr_lo = (u32)ena_buf->paddr;
472 desc->buff_addr_hi =
473 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
474
475 ena_com_sq_update_tail(io_sq);
476
477 return 0;
478 }
479
ena_com_tx_comp_req_id_get(struct ena_com_io_cq * io_cq,u16 * req_id)480 int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
481 {
482 u8 expected_phase, cdesc_phase;
483 struct ena_eth_io_tx_cdesc *cdesc;
484 u16 masked_head;
485
486 masked_head = io_cq->head & (io_cq->q_depth - 1);
487 expected_phase = io_cq->phase;
488
489 cdesc = (struct ena_eth_io_tx_cdesc *)
490 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
491 (masked_head * io_cq->cdesc_entry_size_in_bytes));
492
493 /* When the current completion descriptor phase isn't the same as the
494 * expected, it mean that the device still didn't update
495 * this completion.
496 */
497 cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
498 if (cdesc_phase != expected_phase)
499 return -EAGAIN;
500
501 dma_rmb();
502 if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
503 pr_err("Invalid req id %d\n", cdesc->req_id);
504 return -EINVAL;
505 }
506
507 ena_com_cq_inc_head(io_cq);
508
509 *req_id = READ_ONCE(cdesc->req_id);
510
511 return 0;
512 }
513
ena_com_cq_empty(struct ena_com_io_cq * io_cq)514 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
515 {
516 struct ena_eth_io_rx_cdesc_base *cdesc;
517
518 cdesc = ena_com_get_next_rx_cdesc(io_cq);
519 if (cdesc)
520 return false;
521 else
522 return true;
523 }
524