Lines Matching refs:lbq_desc
1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; in ql_get_curr_lbuf() local
1038 return lbq_desc; in ql_get_curr_lbuf()
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); in ql_get_curr_lchunk() local
1047 dma_unmap_addr(lbq_desc, mapaddr), in ql_get_curr_lchunk()
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) in ql_get_curr_lchunk()
1057 lbq_desc->p.pg_chunk.map, in ql_get_curr_lchunk()
1060 return lbq_desc; in ql_get_curr_lchunk()
1091 struct bq_desc *lbq_desc) in ql_get_next_chunk() argument
1121 lbq_desc->p.pg_chunk = rx_ring->pg_chunk; in ql_get_next_chunk()
1129 lbq_desc->p.pg_chunk.last_flag = 1; in ql_get_next_chunk()
1133 lbq_desc->p.pg_chunk.last_flag = 0; in ql_get_next_chunk()
1142 struct bq_desc *lbq_desc; in ql_update_lbq() local
1151 lbq_desc = &rx_ring->lbq[clean_idx]; in ql_update_lbq()
1152 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { in ql_update_lbq()
1160 map = lbq_desc->p.pg_chunk.map + in ql_update_lbq()
1161 lbq_desc->p.pg_chunk.offset; in ql_update_lbq()
1162 dma_unmap_addr_set(lbq_desc, mapaddr, map); in ql_update_lbq()
1163 dma_unmap_len_set(lbq_desc, maplen, in ql_update_lbq()
1165 *lbq_desc->addr = cpu_to_le64(map); in ql_update_lbq()
1498 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_process_mac_rx_gro_page() local
1504 put_page(lbq_desc->p.pg_chunk.page); in ql_process_mac_rx_gro_page()
1514 put_page(lbq_desc->p.pg_chunk.page); in ql_process_mac_rx_gro_page()
1517 prefetch(lbq_desc->p.pg_chunk.va); in ql_process_mac_rx_gro_page()
1519 lbq_desc->p.pg_chunk.page, in ql_process_mac_rx_gro_page()
1520 lbq_desc->p.pg_chunk.offset, in ql_process_mac_rx_gro_page()
1547 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_process_mac_rx_page() local
1554 put_page(lbq_desc->p.pg_chunk.page); in ql_process_mac_rx_page()
1558 addr = lbq_desc->p.pg_chunk.va; in ql_process_mac_rx_page()
1583 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, in ql_process_mac_rx_page()
1584 lbq_desc->p.pg_chunk.offset + hlen, in ql_process_mac_rx_page()
1627 put_page(lbq_desc->p.pg_chunk.page); in ql_process_mac_rx_page()
1762 struct bq_desc *lbq_desc; in ql_build_rx_skb() local
1853 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1856 lbq_desc->p.pg_chunk.offset, length); in ql_build_rx_skb()
1857 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, in ql_build_rx_skb()
1858 lbq_desc->p.pg_chunk.offset, in ql_build_rx_skb()
1869 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1877 dma_unmap_addr(lbq_desc, in ql_build_rx_skb()
1879 dma_unmap_len(lbq_desc, maplen), in ql_build_rx_skb()
1886 lbq_desc->p.pg_chunk.page, in ql_build_rx_skb()
1887 lbq_desc->p.pg_chunk.offset, in ql_build_rx_skb()
1893 lbq_desc->p.pg_chunk.va, in ql_build_rx_skb()
1933 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1941 lbq_desc->p.pg_chunk.page, in ql_build_rx_skb()
1942 lbq_desc->p.pg_chunk.offset, in ql_build_rx_skb()
1950 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, in ql_build_rx_skb()
2824 struct bq_desc *lbq_desc; in ql_free_lbq_buffers() local
2831 lbq_desc = &rx_ring->lbq[curr_idx]; in ql_free_lbq_buffers()
2833 if (lbq_desc->p.pg_chunk.last_flag) { in ql_free_lbq_buffers()
2835 lbq_desc->p.pg_chunk.map, in ql_free_lbq_buffers()
2838 lbq_desc->p.pg_chunk.last_flag = 0; in ql_free_lbq_buffers()
2841 put_page(lbq_desc->p.pg_chunk.page); in ql_free_lbq_buffers()
2842 lbq_desc->p.pg_chunk.page = NULL; in ql_free_lbq_buffers()
2912 struct bq_desc *lbq_desc; in ql_init_lbq_ring() local
2917 lbq_desc = &rx_ring->lbq[i]; in ql_init_lbq_ring()
2918 memset(lbq_desc, 0, sizeof(*lbq_desc)); in ql_init_lbq_ring()
2919 lbq_desc->index = i; in ql_init_lbq_ring()
2920 lbq_desc->addr = bq; in ql_init_lbq_ring()