Lines Matching +full:ctrl +full:- +full:len

1 // SPDX-License-Identifier: ISC
6 #include <linux/dma-mapping.h>
18 spin_lock_init(&q->lock); in mt76_dma_alloc_queue()
20 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; in mt76_dma_alloc_queue()
21 q->ndesc = n_desc; in mt76_dma_alloc_queue()
22 q->buf_size = bufsize; in mt76_dma_alloc_queue()
23 q->hw_idx = idx; in mt76_dma_alloc_queue()
25 size = q->ndesc * sizeof(struct mt76_desc); in mt76_dma_alloc_queue()
26 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
27 if (!q->desc) in mt76_dma_alloc_queue()
28 return -ENOMEM; in mt76_dma_alloc_queue()
30 size = q->ndesc * sizeof(*q->entry); in mt76_dma_alloc_queue()
31 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
32 if (!q->entry) in mt76_dma_alloc_queue()
33 return -ENOMEM; in mt76_dma_alloc_queue()
36 for (i = 0; i < q->ndesc; i++) in mt76_dma_alloc_queue()
37 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_alloc_queue()
39 writel(q->desc_dma, &q->regs->desc_base); in mt76_dma_alloc_queue()
40 writel(0, &q->regs->cpu_idx); in mt76_dma_alloc_queue()
41 writel(0, &q->regs->dma_idx); in mt76_dma_alloc_queue()
42 writel(q->ndesc, &q->regs->ring_size); in mt76_dma_alloc_queue()
53 u32 ctrl; in mt76_dma_add_buf() local
54 int i, idx = -1; in mt76_dma_add_buf()
57 q->entry[q->head].txwi = DMA_DUMMY_DATA; in mt76_dma_add_buf()
58 q->entry[q->head].skip_buf0 = true; in mt76_dma_add_buf()
64 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); in mt76_dma_add_buf()
65 if (i < nbufs - 1) { in mt76_dma_add_buf()
67 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); in mt76_dma_add_buf()
70 if (i == nbufs - 1) in mt76_dma_add_buf()
71 ctrl |= MT_DMA_CTL_LAST_SEC0; in mt76_dma_add_buf()
72 else if (i == nbufs - 2) in mt76_dma_add_buf()
73 ctrl |= MT_DMA_CTL_LAST_SEC1; in mt76_dma_add_buf()
75 idx = q->head; in mt76_dma_add_buf()
76 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
78 desc = &q->desc[idx]; in mt76_dma_add_buf()
80 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); in mt76_dma_add_buf()
81 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); in mt76_dma_add_buf()
82 WRITE_ONCE(desc->info, cpu_to_le32(info)); in mt76_dma_add_buf()
83 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); in mt76_dma_add_buf()
85 q->queued++; in mt76_dma_add_buf()
88 q->entry[idx].txwi = txwi; in mt76_dma_add_buf()
89 q->entry[idx].skb = skb; in mt76_dma_add_buf()
98 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_tx_cleanup_idx()
99 __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl); in mt76_dma_tx_cleanup_idx()
100 u32 ctrl = le32_to_cpu(__ctrl); in mt76_dma_tx_cleanup_idx() local
102 if (!e->skip_buf0) { in mt76_dma_tx_cleanup_idx()
103 __le32 addr = READ_ONCE(q->desc[idx].buf0); in mt76_dma_tx_cleanup_idx()
104 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); in mt76_dma_tx_cleanup_idx() local
106 dma_unmap_single(dev->dev, le32_to_cpu(addr), len, in mt76_dma_tx_cleanup_idx()
110 if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) { in mt76_dma_tx_cleanup_idx()
111 __le32 addr = READ_ONCE(q->desc[idx].buf1); in mt76_dma_tx_cleanup_idx()
112 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl); in mt76_dma_tx_cleanup_idx() local
114 dma_unmap_single(dev->dev, le32_to_cpu(addr), len, in mt76_dma_tx_cleanup_idx()
118 if (e->txwi == DMA_DUMMY_DATA) in mt76_dma_tx_cleanup_idx()
119 e->txwi = NULL; in mt76_dma_tx_cleanup_idx()
121 if (e->skb == DMA_DUMMY_DATA) in mt76_dma_tx_cleanup_idx()
122 e->skb = NULL; in mt76_dma_tx_cleanup_idx()
131 writel(q->desc_dma, &q->regs->desc_base); in mt76_dma_sync_idx()
132 writel(q->ndesc, &q->regs->ring_size); in mt76_dma_sync_idx()
133 q->head = readl(&q->regs->dma_idx); in mt76_dma_sync_idx()
134 q->tail = q->head; in mt76_dma_sync_idx()
135 writel(q->head, &q->regs->cpu_idx); in mt76_dma_sync_idx()
141 struct mt76_sw_queue *sq = &dev->q_tx[qid]; in mt76_dma_tx_cleanup()
142 struct mt76_queue *q = sq->q; in mt76_dma_tx_cleanup()
153 last = -1; in mt76_dma_tx_cleanup()
155 last = readl(&q->regs->dma_idx); in mt76_dma_tx_cleanup()
157 while ((q->queued > n_queued) && q->tail != last) { in mt76_dma_tx_cleanup()
158 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); in mt76_dma_tx_cleanup()
162 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_tx_cleanup()
166 dev->drv->tx_complete_skb(dev, qid, &entry); in mt76_dma_tx_cleanup()
169 if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE)) in mt76_dma_tx_cleanup()
174 if (!flush && q->tail == last) in mt76_dma_tx_cleanup()
175 last = readl(&q->regs->dma_idx); in mt76_dma_tx_cleanup()
178 spin_lock_bh(&q->lock); in mt76_dma_tx_cleanup()
180 q->queued -= n_queued; in mt76_dma_tx_cleanup()
185 dev->q_tx[i].swq_queued -= n_swq_queued[i]; in mt76_dma_tx_cleanup()
191 wake = wake && q->stopped && in mt76_dma_tx_cleanup()
192 qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; in mt76_dma_tx_cleanup()
194 q->stopped = false; in mt76_dma_tx_cleanup()
196 if (!q->queued) in mt76_dma_tx_cleanup()
197 wake_up(&dev->tx_wait); in mt76_dma_tx_cleanup()
199 spin_unlock_bh(&q->lock); in mt76_dma_tx_cleanup()
202 ieee80211_wake_queue(dev->hw, qid); in mt76_dma_tx_cleanup()
207 int *len, u32 *info, bool *more) in mt76_dma_get_buf() argument
209 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_get_buf()
210 struct mt76_desc *desc = &q->desc[idx]; in mt76_dma_get_buf()
212 void *buf = e->buf; in mt76_dma_get_buf()
213 int buf_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_get_buf()
215 buf_addr = le32_to_cpu(READ_ONCE(desc->buf0)); in mt76_dma_get_buf()
216 if (len) { in mt76_dma_get_buf()
217 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl)); in mt76_dma_get_buf()
218 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl); in mt76_dma_get_buf()
223 *info = le32_to_cpu(desc->info); in mt76_dma_get_buf()
225 dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE); in mt76_dma_get_buf()
226 e->buf = NULL; in mt76_dma_get_buf()
233 int *len, u32 *info, bool *more) in mt76_dma_dequeue() argument
235 int idx = q->tail; in mt76_dma_dequeue()
238 if (!q->queued) in mt76_dma_dequeue()
241 if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) in mt76_dma_dequeue()
244 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_dequeue()
245 q->queued--; in mt76_dma_dequeue()
247 return mt76_dma_get_buf(dev, q, idx, len, info, more); in mt76_dma_dequeue()
253 writel(q->head, &q->regs->cpu_idx); in mt76_dma_kick_queue()
260 struct mt76_queue *q = dev->q_tx[qid].q; in mt76_dma_tx_queue_skb_raw()
264 addr = dma_map_single(dev->dev, skb->data, skb->len, in mt76_dma_tx_queue_skb_raw()
266 if (unlikely(dma_mapping_error(dev->dev, addr))) in mt76_dma_tx_queue_skb_raw()
267 return -ENOMEM; in mt76_dma_tx_queue_skb_raw()
270 buf.len = skb->len; in mt76_dma_tx_queue_skb_raw()
272 spin_lock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
275 spin_unlock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
285 struct mt76_queue *q = dev->q_tx[qid].q; in mt76_dma_tx_queue_skb()
289 int len, n = 0, ret = -ENOMEM; in mt76_dma_tx_queue_skb() local
298 ieee80211_free_txskb(dev->hw, skb); in mt76_dma_tx_queue_skb()
299 return -ENOMEM; in mt76_dma_tx_queue_skb()
303 skb->prev = skb->next = NULL; in mt76_dma_tx_queue_skb()
304 if (dev->drv->tx_aligned4_skbs) in mt76_dma_tx_queue_skb()
307 len = skb_headlen(skb); in mt76_dma_tx_queue_skb()
308 addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE); in mt76_dma_tx_queue_skb()
309 if (unlikely(dma_mapping_error(dev->dev, addr))) in mt76_dma_tx_queue_skb()
312 tx_info.buf[n].addr = t->dma_addr; in mt76_dma_tx_queue_skb()
313 tx_info.buf[n++].len = dev->drv->txwi_size; in mt76_dma_tx_queue_skb()
315 tx_info.buf[n++].len = len; in mt76_dma_tx_queue_skb()
321 addr = dma_map_single(dev->dev, iter->data, iter->len, in mt76_dma_tx_queue_skb()
323 if (unlikely(dma_mapping_error(dev->dev, addr))) in mt76_dma_tx_queue_skb()
327 tx_info.buf[n++].len = iter->len; in mt76_dma_tx_queue_skb()
331 dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size, in mt76_dma_tx_queue_skb()
333 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); in mt76_dma_tx_queue_skb()
334 dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size, in mt76_dma_tx_queue_skb()
339 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { in mt76_dma_tx_queue_skb()
340 ret = -ENOMEM; in mt76_dma_tx_queue_skb()
348 for (n--; n > 0; n--) in mt76_dma_tx_queue_skb()
349 dma_unmap_single(dev->dev, tx_info.buf[n].addr, in mt76_dma_tx_queue_skb()
350 tx_info.buf[n].len, DMA_TO_DEVICE); in mt76_dma_tx_queue_skb()
355 dev->drv->tx_complete_skb(dev, qid, &e); in mt76_dma_tx_queue_skb()
366 int len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_fill() local
367 int offset = q->buf_offset; in mt76_dma_rx_fill()
370 spin_lock_bh(&q->lock); in mt76_dma_rx_fill()
372 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill()
375 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); in mt76_dma_rx_fill()
379 addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE); in mt76_dma_rx_fill()
380 if (unlikely(dma_mapping_error(dev->dev, addr))) { in mt76_dma_rx_fill()
386 qbuf.len = len - offset; in mt76_dma_rx_fill()
394 spin_unlock_bh(&q->lock); in mt76_dma_rx_fill()
406 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
414 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
416 if (!q->rx_page.va) in mt76_dma_rx_cleanup()
419 page = virt_to_page(q->rx_page.va); in mt76_dma_rx_cleanup()
420 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); in mt76_dma_rx_cleanup()
421 memset(&q->rx_page, 0, sizeof(q->rx_page)); in mt76_dma_rx_cleanup()
427 struct mt76_queue *q = &dev->q_rx[qid]; in mt76_dma_rx_reset()
430 for (i = 0; i < q->ndesc; i++) in mt76_dma_rx_reset()
431 q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_rx_reset()
437 if (!q->rx_head) in mt76_dma_rx_reset()
440 dev_kfree_skb(q->rx_head); in mt76_dma_rx_reset()
441 q->rx_head = NULL; in mt76_dma_rx_reset()
446 int len, bool more) in mt76_add_fragment() argument
449 int offset = data - page_address(page); in mt76_add_fragment()
450 struct sk_buff *skb = q->rx_head; in mt76_add_fragment()
452 offset += q->buf_offset; in mt76_add_fragment()
453 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len, in mt76_add_fragment()
454 q->buf_size); in mt76_add_fragment()
459 q->rx_head = NULL; in mt76_add_fragment()
460 dev->drv->rx_skb(dev, q - dev->q_rx, skb); in mt76_add_fragment()
466 int len, data_len, done = 0; in mt76_dma_rx_process() local
474 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); in mt76_dma_rx_process()
478 if (q->rx_head) in mt76_dma_rx_process()
479 data_len = q->buf_size; in mt76_dma_rx_process()
481 data_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_process()
483 if (data_len < len + q->buf_offset) { in mt76_dma_rx_process()
484 dev_kfree_skb(q->rx_head); in mt76_dma_rx_process()
485 q->rx_head = NULL; in mt76_dma_rx_process()
491 if (q->rx_head) { in mt76_dma_rx_process()
492 mt76_add_fragment(dev, q, data, len, more); in mt76_dma_rx_process()
496 skb = build_skb(data, q->buf_size); in mt76_dma_rx_process()
501 skb_reserve(skb, q->buf_offset); in mt76_dma_rx_process()
503 if (q == &dev->q_rx[MT_RXQ_MCU]) { in mt76_dma_rx_process()
504 u32 *rxfce = (u32 *)skb->cb; in mt76_dma_rx_process()
508 __skb_put(skb, len); in mt76_dma_rx_process()
512 q->rx_head = skb; in mt76_dma_rx_process()
516 dev->drv->rx_skb(dev, q - dev->q_rx, skb); in mt76_dma_rx_process()
529 dev = container_of(napi->dev, struct mt76_dev, napi_dev); in mt76_dma_rx_poll()
530 qid = napi - dev->napi; in mt76_dma_rx_poll()
535 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); in mt76_dma_rx_poll()
544 dev->drv->rx_poll_complete(dev, qid); in mt76_dma_rx_poll()
555 init_dummy_netdev(&dev->napi_dev); in mt76_dma_init()
557 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { in mt76_dma_init()
558 netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, in mt76_dma_init()
560 mt76_dma_rx_fill(dev, &dev->q_rx[i]); in mt76_dma_init()
561 skb_queue_head_init(&dev->rx_skb[i]); in mt76_dma_init()
562 napi_enable(&dev->napi[i]); in mt76_dma_init()
580 dev->queue_ops = &mt76_dma_ops; in mt76_dma_attach()
588 netif_napi_del(&dev->tx_napi); in mt76_dma_cleanup()
589 for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) in mt76_dma_cleanup()
592 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { in mt76_dma_cleanup()
593 netif_napi_del(&dev->napi[i]); in mt76_dma_cleanup()
594 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]); in mt76_dma_cleanup()