Lines Matching +full:1 +full:q
83 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument
85 writel(q->desc_dma, &q->regs->desc_base); in mt76_dma_sync_idx()
86 writel(q->ndesc, &q->regs->ring_size); in mt76_dma_sync_idx()
87 q->head = readl(&q->regs->dma_idx); in mt76_dma_sync_idx()
88 q->tail = q->head; in mt76_dma_sync_idx()
92 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_queue_reset() argument
96 if (!q) in mt76_dma_queue_reset()
100 for (i = 0; i < q->ndesc; i++) in mt76_dma_queue_reset()
101 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_queue_reset()
103 writel(0, &q->regs->cpu_idx); in mt76_dma_queue_reset()
104 writel(0, &q->regs->dma_idx); in mt76_dma_queue_reset()
105 mt76_dma_sync_idx(dev, q); in mt76_dma_queue_reset()
109 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_alloc_queue() argument
115 spin_lock_init(&q->lock); in mt76_dma_alloc_queue()
116 spin_lock_init(&q->cleanup_lock); in mt76_dma_alloc_queue()
118 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; in mt76_dma_alloc_queue()
119 q->ndesc = n_desc; in mt76_dma_alloc_queue()
120 q->buf_size = bufsize; in mt76_dma_alloc_queue()
121 q->hw_idx = idx; in mt76_dma_alloc_queue()
123 size = q->ndesc * sizeof(struct mt76_desc); in mt76_dma_alloc_queue()
124 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
125 if (!q->desc) in mt76_dma_alloc_queue()
128 size = q->ndesc * sizeof(*q->entry); in mt76_dma_alloc_queue()
129 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
130 if (!q->entry) in mt76_dma_alloc_queue()
133 mt76_dma_queue_reset(dev, q); in mt76_dma_alloc_queue()
139 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_buf() argument
146 int i, idx = -1; in mt76_dma_add_buf()
149 q->entry[q->head].txwi = DMA_DUMMY_DATA; in mt76_dma_add_buf()
150 q->entry[q->head].skip_buf0 = true; in mt76_dma_add_buf()
156 idx = q->head; in mt76_dma_add_buf()
157 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
159 desc = &q->desc[idx]; in mt76_dma_add_buf()
160 entry = &q->entry[idx]; in mt76_dma_add_buf()
164 entry->skip_buf1 = i == nbufs - 1; in mt76_dma_add_buf()
170 if (i < nbufs - 1) { in mt76_dma_add_buf()
171 entry->dma_addr[1] = buf[1].addr; in mt76_dma_add_buf()
172 entry->dma_len[1] = buf[1].len; in mt76_dma_add_buf()
173 buf1 = buf[1].addr; in mt76_dma_add_buf()
174 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); in mt76_dma_add_buf()
175 if (buf[1].skip_unmap) in mt76_dma_add_buf()
179 if (i == nbufs - 1) in mt76_dma_add_buf()
189 q->queued++; in mt76_dma_add_buf()
192 q->entry[idx].txwi = txwi; in mt76_dma_add_buf()
193 q->entry[idx].skb = skb; in mt76_dma_add_buf()
194 q->entry[idx].wcid = 0xffff; in mt76_dma_add_buf()
200 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_tx_cleanup_idx() argument
203 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_tx_cleanup_idx()
210 dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1], in mt76_dma_tx_cleanup_idx()
224 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_kick_queue() argument
227 writel(q->head, &q->regs->cpu_idx); in mt76_dma_kick_queue()
231 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) in mt76_dma_tx_cleanup() argument
236 if (!q) in mt76_dma_tx_cleanup()
239 spin_lock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
241 last = -1; in mt76_dma_tx_cleanup()
243 last = readl(&q->regs->dma_idx); in mt76_dma_tx_cleanup()
245 while (q->queued > 0 && q->tail != last) { in mt76_dma_tx_cleanup()
246 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); in mt76_dma_tx_cleanup()
247 mt76_queue_tx_complete(dev, q, &entry); in mt76_dma_tx_cleanup()
254 if (!flush && q->tail == last) in mt76_dma_tx_cleanup()
255 last = readl(&q->regs->dma_idx); in mt76_dma_tx_cleanup()
258 spin_unlock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
261 spin_lock_bh(&q->lock); in mt76_dma_tx_cleanup()
262 mt76_dma_sync_idx(dev, q); in mt76_dma_tx_cleanup()
263 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_cleanup()
264 spin_unlock_bh(&q->lock); in mt76_dma_tx_cleanup()
267 if (!q->queued) in mt76_dma_tx_cleanup()
272 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_get_buf() argument
275 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_get_buf()
276 struct mt76_desc *desc = &q->desc[idx]; in mt76_dma_get_buf()
279 int buf_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_get_buf()
298 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, in mt76_dma_dequeue() argument
301 int idx = q->tail; in mt76_dma_dequeue()
304 if (!q->queued) in mt76_dma_dequeue()
308 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_dequeue()
309 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) in mt76_dma_dequeue()
312 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_dequeue()
313 q->queued--; in mt76_dma_dequeue()
315 return mt76_dma_get_buf(dev, q, idx, len, info, more); in mt76_dma_dequeue()
319 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb_raw() argument
325 if (q->queued + 1 >= q->ndesc - 1) in mt76_dma_tx_queue_skb_raw()
336 spin_lock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
337 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); in mt76_dma_tx_queue_skb_raw()
338 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_queue_skb_raw()
339 spin_unlock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
349 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb() argument
400 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { in mt76_dma_tx_queue_skb()
407 ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info); in mt76_dma_tx_queue_skb()
413 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, in mt76_dma_tx_queue_skb()
443 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_fill() argument
448 int len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_fill()
449 int offset = q->buf_offset; in mt76_dma_rx_fill()
451 spin_lock_bh(&q->lock); in mt76_dma_rx_fill()
453 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill()
456 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); in mt76_dma_rx_fill()
468 mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); in mt76_dma_rx_fill()
473 mt76_dma_kick_queue(dev, q); in mt76_dma_rx_fill()
475 spin_unlock_bh(&q->lock); in mt76_dma_rx_fill()
481 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_cleanup() argument
487 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
489 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); in mt76_dma_rx_cleanup()
494 } while (1); in mt76_dma_rx_cleanup()
495 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
497 if (!q->rx_page.va) in mt76_dma_rx_cleanup()
500 page = virt_to_page(q->rx_page.va); in mt76_dma_rx_cleanup()
501 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); in mt76_dma_rx_cleanup()
502 memset(&q->rx_page, 0, sizeof(q->rx_page)); in mt76_dma_rx_cleanup()
508 struct mt76_queue *q = &dev->q_rx[qid]; in mt76_dma_rx_reset() local
511 for (i = 0; i < q->ndesc; i++) in mt76_dma_rx_reset()
512 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_rx_reset()
514 mt76_dma_rx_cleanup(dev, q); in mt76_dma_rx_reset()
515 mt76_dma_sync_idx(dev, q); in mt76_dma_rx_reset()
516 mt76_dma_rx_fill(dev, q); in mt76_dma_rx_reset()
518 if (!q->rx_head) in mt76_dma_rx_reset()
521 dev_kfree_skb(q->rx_head); in mt76_dma_rx_reset()
522 q->rx_head = NULL; in mt76_dma_rx_reset()
526 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, in mt76_add_fragment() argument
529 struct sk_buff *skb = q->rx_head; in mt76_add_fragment()
535 int offset = data - page_address(page) + q->buf_offset; in mt76_add_fragment()
537 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); in mt76_add_fragment()
545 q->rx_head = NULL; in mt76_add_fragment()
547 dev->drv->rx_skb(dev, q - dev->q_rx, skb); in mt76_add_fragment()
553 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) in mt76_dma_rx_process() argument
563 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); in mt76_dma_rx_process()
567 if (q->rx_head) in mt76_dma_rx_process()
568 data_len = q->buf_size; in mt76_dma_rx_process()
570 data_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_process()
572 if (data_len < len + q->buf_offset) { in mt76_dma_rx_process()
573 dev_kfree_skb(q->rx_head); in mt76_dma_rx_process()
574 q->rx_head = NULL; in mt76_dma_rx_process()
580 if (q->rx_head) { in mt76_dma_rx_process()
581 mt76_add_fragment(dev, q, data, len, more); in mt76_dma_rx_process()
585 skb = build_skb(data, q->buf_size); in mt76_dma_rx_process()
590 skb_reserve(skb, q->buf_offset); in mt76_dma_rx_process()
592 if (q == &dev->q_rx[MT_RXQ_MCU]) { in mt76_dma_rx_process()
601 q->rx_head = skb; in mt76_dma_rx_process()
605 dev->drv->rx_skb(dev, q - dev->q_rx, skb); in mt76_dma_rx_process()
608 mt76_dma_rx_fill(dev, q); in mt76_dma_rx_process()
647 dev->napi_dev.threaded = 1; in mt76_dma_init()