Lines Matching full:q

81 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,  in mt76_dma_alloc_queue()  argument
88 spin_lock_init(&q->lock); in mt76_dma_alloc_queue()
90 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; in mt76_dma_alloc_queue()
91 q->ndesc = n_desc; in mt76_dma_alloc_queue()
92 q->buf_size = bufsize; in mt76_dma_alloc_queue()
93 q->hw_idx = idx; in mt76_dma_alloc_queue()
95 size = q->ndesc * sizeof(struct mt76_desc); in mt76_dma_alloc_queue()
96 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
97 if (!q->desc) in mt76_dma_alloc_queue()
100 size = q->ndesc * sizeof(*q->entry); in mt76_dma_alloc_queue()
101 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
102 if (!q->entry) in mt76_dma_alloc_queue()
106 for (i = 0; i < q->ndesc; i++) in mt76_dma_alloc_queue()
107 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_alloc_queue()
109 writel(q->desc_dma, &q->regs->desc_base); in mt76_dma_alloc_queue()
110 writel(0, &q->regs->cpu_idx); in mt76_dma_alloc_queue()
111 writel(0, &q->regs->dma_idx); in mt76_dma_alloc_queue()
112 writel(q->ndesc, &q->regs->ring_size); in mt76_dma_alloc_queue()
118 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_buf() argument
128 q->entry[q->head].txwi = DMA_DUMMY_DATA; in mt76_dma_add_buf()
129 q->entry[q->head].skip_buf0 = true; in mt76_dma_add_buf()
135 idx = q->head; in mt76_dma_add_buf()
136 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
138 desc = &q->desc[idx]; in mt76_dma_add_buf()
139 entry = &q->entry[idx]; in mt76_dma_add_buf()
168 q->queued++; in mt76_dma_add_buf()
171 q->entry[idx].txwi = txwi; in mt76_dma_add_buf()
172 q->entry[idx].skb = skb; in mt76_dma_add_buf()
178 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_tx_cleanup_idx() argument
181 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_tx_cleanup_idx()
202 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument
204 writel(q->desc_dma, &q->regs->desc_base); in mt76_dma_sync_idx()
205 writel(q->ndesc, &q->regs->ring_size); in mt76_dma_sync_idx()
206 q->head = readl(&q->regs->dma_idx); in mt76_dma_sync_idx()
207 q->tail = q->head; in mt76_dma_sync_idx()
211 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_kick_queue() argument
214 writel(q->head, &q->regs->cpu_idx); in mt76_dma_kick_queue()
220 struct mt76_queue *q = dev->q_tx[qid]; in mt76_dma_tx_cleanup() local
225 if (!q) in mt76_dma_tx_cleanup()
231 last = readl(&q->regs->dma_idx); in mt76_dma_tx_cleanup()
233 while (q->queued > 0 && q->tail != last) { in mt76_dma_tx_cleanup()
234 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); in mt76_dma_tx_cleanup()
235 mt76_queue_tx_complete(dev, q, &entry); in mt76_dma_tx_cleanup()
243 if (!flush && q->tail == last) in mt76_dma_tx_cleanup()
244 last = readl(&q->regs->dma_idx); in mt76_dma_tx_cleanup()
249 spin_lock_bh(&q->lock); in mt76_dma_tx_cleanup()
250 mt76_dma_sync_idx(dev, q); in mt76_dma_tx_cleanup()
251 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_cleanup()
252 spin_unlock_bh(&q->lock); in mt76_dma_tx_cleanup()
255 wake = wake && q->stopped && in mt76_dma_tx_cleanup()
256 qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; in mt76_dma_tx_cleanup()
258 q->stopped = false; in mt76_dma_tx_cleanup()
260 if (!q->queued) in mt76_dma_tx_cleanup()
268 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_get_buf() argument
271 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_get_buf()
272 struct mt76_desc *desc = &q->desc[idx]; in mt76_dma_get_buf()
275 int buf_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_get_buf()
294 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, in mt76_dma_dequeue() argument
297 int idx = q->tail; in mt76_dma_dequeue()
300 if (!q->queued) in mt76_dma_dequeue()
304 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_dequeue()
305 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) in mt76_dma_dequeue()
308 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_dequeue()
309 q->queued--; in mt76_dma_dequeue()
311 return mt76_dma_get_buf(dev, q, idx, len, info, more); in mt76_dma_dequeue()
318 struct mt76_queue *q = dev->q_tx[qid]; in mt76_dma_tx_queue_skb_raw() local
322 if (q->queued + 1 >= q->ndesc - 1) in mt76_dma_tx_queue_skb_raw()
333 spin_lock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
334 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); in mt76_dma_tx_queue_skb_raw()
335 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_queue_skb_raw()
336 spin_unlock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
350 struct mt76_queue *q = dev->q_tx[qid]; in mt76_dma_tx_queue_skb() local
406 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { in mt76_dma_tx_queue_skb()
411 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, in mt76_dma_tx_queue_skb()
434 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_fill() argument
439 int len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_fill()
440 int offset = q->buf_offset; in mt76_dma_rx_fill()
442 spin_lock_bh(&q->lock); in mt76_dma_rx_fill()
444 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill()
447 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); in mt76_dma_rx_fill()
459 mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); in mt76_dma_rx_fill()
464 mt76_dma_kick_queue(dev, q); in mt76_dma_rx_fill()
466 spin_unlock_bh(&q->lock); in mt76_dma_rx_fill()
472 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_cleanup() argument
478 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
480 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); in mt76_dma_rx_cleanup()
486 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
488 if (!q->rx_page.va) in mt76_dma_rx_cleanup()
491 page = virt_to_page(q->rx_page.va); in mt76_dma_rx_cleanup()
492 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); in mt76_dma_rx_cleanup()
493 memset(&q->rx_page, 0, sizeof(q->rx_page)); in mt76_dma_rx_cleanup()
499 struct mt76_queue *q = &dev->q_rx[qid]; in mt76_dma_rx_reset() local
502 for (i = 0; i < q->ndesc; i++) in mt76_dma_rx_reset()
503 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_rx_reset()
505 mt76_dma_rx_cleanup(dev, q); in mt76_dma_rx_reset()
506 mt76_dma_sync_idx(dev, q); in mt76_dma_rx_reset()
507 mt76_dma_rx_fill(dev, q); in mt76_dma_rx_reset()
509 if (!q->rx_head) in mt76_dma_rx_reset()
512 dev_kfree_skb(q->rx_head); in mt76_dma_rx_reset()
513 q->rx_head = NULL; in mt76_dma_rx_reset()
517 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, in mt76_add_fragment() argument
522 struct sk_buff *skb = q->rx_head; in mt76_add_fragment()
526 offset += q->buf_offset; in mt76_add_fragment()
528 q->buf_size); in mt76_add_fragment()
534 q->rx_head = NULL; in mt76_add_fragment()
535 dev->drv->rx_skb(dev, q - dev->q_rx, skb); in mt76_add_fragment()
539 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) in mt76_dma_rx_process() argument
549 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); in mt76_dma_rx_process()
553 if (q->rx_head) in mt76_dma_rx_process()
554 data_len = q->buf_size; in mt76_dma_rx_process()
556 data_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_process()
558 if (data_len < len + q->buf_offset) { in mt76_dma_rx_process()
559 dev_kfree_skb(q->rx_head); in mt76_dma_rx_process()
560 q->rx_head = NULL; in mt76_dma_rx_process()
566 if (q->rx_head) { in mt76_dma_rx_process()
567 mt76_add_fragment(dev, q, data, len, more); in mt76_dma_rx_process()
571 skb = build_skb(data, q->buf_size); in mt76_dma_rx_process()
576 skb_reserve(skb, q->buf_offset); in mt76_dma_rx_process()
578 if (q == &dev->q_rx[MT_RXQ_MCU]) { in mt76_dma_rx_process()
587 q->rx_head = skb; in mt76_dma_rx_process()
591 dev->drv->rx_skb(dev, q - dev->q_rx, skb); in mt76_dma_rx_process()
594 mt76_dma_rx_fill(dev, q); in mt76_dma_rx_process()