Lines Matching refs:rcb
247 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb) in bnad_rxq_alloc_uninit() argument
249 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; in bnad_rxq_alloc_uninit()
259 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) in bnad_rxq_alloc_init() argument
261 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; in bnad_rxq_alloc_init()
264 bnad_rxq_alloc_uninit(bnad, rcb); in bnad_rxq_alloc_init()
266 order = get_order(rcb->rxq->buffer_size); in bnad_rxq_alloc_init()
270 if (bna_is_small_rxq(rcb->id)) { in bnad_rxq_alloc_init()
272 unmap_q->map_size = rcb->rxq->buffer_size; in bnad_rxq_alloc_init()
274 if (rcb->rxq->multi_buffer) { in bnad_rxq_alloc_init()
276 unmap_q->map_size = rcb->rxq->buffer_size; in bnad_rxq_alloc_init()
281 (rcb->rxq->buffer_size > 2048) ? in bnad_rxq_alloc_init()
322 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) in bnad_rxq_cleanup() argument
324 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; in bnad_rxq_cleanup()
327 for (i = 0; i < rcb->q_depth; i++) { in bnad_rxq_cleanup()
335 bnad_rxq_alloc_uninit(bnad, rcb); in bnad_rxq_cleanup()
339 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) in bnad_rxq_refill_page() argument
342 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; in bnad_rxq_refill_page()
349 prod = rcb->producer_index; in bnad_rxq_refill_page()
350 q_depth = rcb->q_depth; in bnad_rxq_refill_page()
371 rcb->rxq->rxbuf_alloc_failed++; in bnad_rxq_refill_page()
380 rcb->rxq->rxbuf_map_failed++; in bnad_rxq_refill_page()
395 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; in bnad_rxq_refill_page()
403 rcb->producer_index = prod; in bnad_rxq_refill_page()
405 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) in bnad_rxq_refill_page()
406 bna_rxq_prod_indx_doorbell(rcb); in bnad_rxq_refill_page()
413 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) in bnad_rxq_refill_skb() argument
416 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; in bnad_rxq_refill_skb()
422 buff_sz = rcb->rxq->buffer_size; in bnad_rxq_refill_skb()
423 prod = rcb->producer_index; in bnad_rxq_refill_skb()
424 q_depth = rcb->q_depth; in bnad_rxq_refill_skb()
434 rcb->rxq->rxbuf_alloc_failed++; in bnad_rxq_refill_skb()
443 rcb->rxq->rxbuf_map_failed++; in bnad_rxq_refill_skb()
451 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; in bnad_rxq_refill_skb()
459 rcb->producer_index = prod; in bnad_rxq_refill_skb()
461 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) in bnad_rxq_refill_skb()
462 bna_rxq_prod_indx_doorbell(rcb); in bnad_rxq_refill_skb()
469 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) in bnad_rxq_post() argument
471 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; in bnad_rxq_post()
474 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth); in bnad_rxq_post()
479 bnad_rxq_refill_skb(bnad, rcb, to_alloc); in bnad_rxq_post()
481 bnad_rxq_refill_page(bnad, rcb, to_alloc); in bnad_rxq_post()
499 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb, in bnad_cq_drop_packet() argument
506 unmap_q = rcb->unmap_q; in bnad_cq_drop_packet()
509 BNA_QE_INDX_INC(ci, rcb->q_depth); in bnad_cq_drop_packet()
521 struct bna_rcb *rcb; in bnad_cq_setup_skb_frags() local
531 rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0]; in bnad_cq_setup_skb_frags()
532 unmap_q = rcb->unmap_q; in bnad_cq_setup_skb_frags()
533 bnad = rcb->bnad; in bnad_cq_setup_skb_frags()
534 ci = rcb->consumer_index; in bnad_cq_setup_skb_frags()
545 BNA_QE_INDX_INC(ci, rcb->q_depth); in bnad_cq_setup_skb_frags()
590 struct bna_rcb *rcb = NULL; in bnad_cq_process() local
620 rcb = ccb->rcb[1]; in bnad_cq_process()
622 rcb = ccb->rcb[0]; in bnad_cq_process()
624 unmap_q = rcb->unmap_q; in bnad_cq_process()
627 sop_ci = rcb->consumer_index; in bnad_cq_process()
683 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs); in bnad_cq_process()
684 rcb->rxq->rx_packets_with_error++; in bnad_cq_process()
694 rcb->rxq->rx_packets++; in bnad_cq_process()
695 rcb->rxq->rx_bytes += totlen; in bnad_cq_process()
720 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth); in bnad_cq_process()
729 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) in bnad_cq_process()
732 bnad_rxq_post(bnad, ccb->rcb[0]); in bnad_cq_process()
733 if (ccb->rcb[1]) in bnad_cq_process()
734 bnad_rxq_post(bnad, ccb->rcb[1]); in bnad_cq_process()
1163 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags); in bnad_cb_rx_stall()
1165 if (ccb->rcb[1]) in bnad_cb_rx_stall()
1166 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags); in bnad_cb_rx_stall()
1198 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]); in bnad_rx_cleanup()
1199 if (rx_ctrl->ccb->rcb[1]) in bnad_rx_cleanup()
1200 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]); in bnad_rx_cleanup()
1222 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); in bnad_cb_rx_cleanup()
1224 if (ccb->rcb[1]) in bnad_cb_rx_cleanup()
1225 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); in bnad_cb_rx_cleanup()
1236 struct bna_rcb *rcb; in bnad_cb_rx_post() local
1249 rcb = ccb->rcb[j]; in bnad_cb_rx_post()
1250 if (!rcb) in bnad_cb_rx_post()
1253 bnad_rxq_alloc_init(bnad, rcb); in bnad_cb_rx_post()
1254 set_bit(BNAD_RXQ_STARTED, &rcb->flags); in bnad_cb_rx_post()
1255 set_bit(BNAD_RXQ_POST_OK, &rcb->flags); in bnad_cb_rx_post()
1256 bnad_rxq_post(bnad, rcb); in bnad_cb_rx_post()
2394 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; in bnad_netdev_qstats_fill()
2396 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; in bnad_netdev_qstats_fill()
2397 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && in bnad_netdev_qstats_fill()
2399 rcb[1]->rxq) { in bnad_netdev_qstats_fill()
2402 ccb->rcb[1]->rxq->rx_packets; in bnad_netdev_qstats_fill()
2405 ccb->rcb[1]->rxq->rx_bytes; in bnad_netdev_qstats_fill()