Lines Matching refs:rq

27 static int vnic_rq_alloc_bufs(struct vnic_rq *rq)  in vnic_rq_alloc_bufs()  argument
30 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
34 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
35 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
42 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
45 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
46 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
48 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
51 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
59 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
60 rq->buf_index = 0; in vnic_rq_alloc_bufs()
65 void vnic_rq_free(struct vnic_rq *rq) in vnic_rq_free() argument
70 vdev = rq->vdev; in vnic_rq_free()
72 vnic_dev_free_desc_ring(vdev, &rq->ring); in vnic_rq_free()
75 kfree(rq->bufs[i]); in vnic_rq_free()
76 rq->bufs[i] = NULL; in vnic_rq_free()
79 rq->ctrl = NULL; in vnic_rq_free()
82 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, in vnic_rq_alloc() argument
87 rq->index = index; in vnic_rq_alloc()
88 rq->vdev = vdev; in vnic_rq_alloc()
90 rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); in vnic_rq_alloc()
91 if (!rq->ctrl) { in vnic_rq_alloc()
96 vnic_rq_disable(rq); in vnic_rq_alloc()
98 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size); in vnic_rq_alloc()
102 err = vnic_rq_alloc_bufs(rq); in vnic_rq_alloc()
104 vnic_rq_free(rq); in vnic_rq_alloc()
111 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, in vnic_rq_init() argument
118 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_rq_init()
119 writeq(paddr, &rq->ctrl->ring_base); in vnic_rq_init()
120 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); in vnic_rq_init()
121 iowrite32(cq_index, &rq->ctrl->cq_index); in vnic_rq_init()
122 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); in vnic_rq_init()
123 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); in vnic_rq_init()
124 iowrite32(0, &rq->ctrl->dropped_packet_count); in vnic_rq_init()
125 iowrite32(0, &rq->ctrl->error_status); in vnic_rq_init()
128 fetch_index = ioread32(&rq->ctrl->fetch_index); in vnic_rq_init()
129 rq->to_use = rq->to_clean = in vnic_rq_init()
130 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] in vnic_rq_init()
132 iowrite32(fetch_index, &rq->ctrl->posted_index); in vnic_rq_init()
134 rq->buf_index = 0; in vnic_rq_init()
137 unsigned int vnic_rq_error_status(struct vnic_rq *rq) in vnic_rq_error_status() argument
139 return ioread32(&rq->ctrl->error_status); in vnic_rq_error_status()
142 void vnic_rq_enable(struct vnic_rq *rq) in vnic_rq_enable() argument
144 iowrite32(1, &rq->ctrl->enable); in vnic_rq_enable()
147 int vnic_rq_disable(struct vnic_rq *rq) in vnic_rq_disable() argument
151 iowrite32(0, &rq->ctrl->enable); in vnic_rq_disable()
155 if (!(ioread32(&rq->ctrl->running))) in vnic_rq_disable()
160 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); in vnic_rq_disable()
165 void vnic_rq_clean(struct vnic_rq *rq, in vnic_rq_clean() argument
166 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) in vnic_rq_clean() argument
171 WARN_ON(ioread32(&rq->ctrl->enable)); in vnic_rq_clean()
173 buf = rq->to_clean; in vnic_rq_clean()
175 while (vnic_rq_desc_used(rq) > 0) { in vnic_rq_clean()
177 (*buf_clean)(rq, buf); in vnic_rq_clean()
179 buf = rq->to_clean = buf->next; in vnic_rq_clean()
180 rq->ring.desc_avail++; in vnic_rq_clean()
184 fetch_index = ioread32(&rq->ctrl->fetch_index); in vnic_rq_clean()
185 rq->to_use = rq->to_clean = in vnic_rq_clean()
186 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] in vnic_rq_clean()
188 iowrite32(fetch_index, &rq->ctrl->posted_index); in vnic_rq_clean()
190 rq->buf_index = 0; in vnic_rq_clean()
192 vnic_dev_clear_desc_ring(&rq->ring); in vnic_rq_clean()