1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5
6 #include <linux/dma-mapping.h>
7 #include "mt76.h"
8 #include "dma.h"
9
10 static int
mt76_dma_alloc_queue(struct mt76_dev * dev,struct mt76_queue * q,int idx,int n_desc,int bufsize,u32 ring_base)11 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
12 int idx, int n_desc, int bufsize,
13 u32 ring_base)
14 {
15 int size;
16 int i;
17
18 spin_lock_init(&q->lock);
19
20 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
21 q->ndesc = n_desc;
22 q->buf_size = bufsize;
23 q->hw_idx = idx;
24
25 size = q->ndesc * sizeof(struct mt76_desc);
26 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
27 if (!q->desc)
28 return -ENOMEM;
29
30 size = q->ndesc * sizeof(*q->entry);
31 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
32 if (!q->entry)
33 return -ENOMEM;
34
35 /* clear descriptors */
36 for (i = 0; i < q->ndesc; i++)
37 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
38
39 writel(q->desc_dma, &q->regs->desc_base);
40 writel(0, &q->regs->cpu_idx);
41 writel(0, &q->regs->dma_idx);
42 writel(q->ndesc, &q->regs->ring_size);
43
44 return 0;
45 }
46
47 static int
mt76_dma_add_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,int nbufs,u32 info,struct sk_buff * skb,void * txwi)48 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
49 struct mt76_queue_buf *buf, int nbufs, u32 info,
50 struct sk_buff *skb, void *txwi)
51 {
52 struct mt76_desc *desc;
53 u32 ctrl;
54 int i, idx = -1;
55
56 if (txwi) {
57 q->entry[q->head].txwi = DMA_DUMMY_DATA;
58 q->entry[q->head].skip_buf0 = true;
59 }
60
61 for (i = 0; i < nbufs; i += 2, buf += 2) {
62 u32 buf0 = buf[0].addr, buf1 = 0;
63
64 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
65 if (i < nbufs - 1) {
66 buf1 = buf[1].addr;
67 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
68 }
69
70 if (i == nbufs - 1)
71 ctrl |= MT_DMA_CTL_LAST_SEC0;
72 else if (i == nbufs - 2)
73 ctrl |= MT_DMA_CTL_LAST_SEC1;
74
75 idx = q->head;
76 q->head = (q->head + 1) % q->ndesc;
77
78 desc = &q->desc[idx];
79
80 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
81 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
82 WRITE_ONCE(desc->info, cpu_to_le32(info));
83 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
84
85 q->queued++;
86 }
87
88 q->entry[idx].txwi = txwi;
89 q->entry[idx].skb = skb;
90
91 return idx;
92 }
93
94 static void
mt76_dma_tx_cleanup_idx(struct mt76_dev * dev,struct mt76_queue * q,int idx,struct mt76_queue_entry * prev_e)95 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
96 struct mt76_queue_entry *prev_e)
97 {
98 struct mt76_queue_entry *e = &q->entry[idx];
99 __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
100 u32 ctrl = le32_to_cpu(__ctrl);
101
102 if (!e->skip_buf0) {
103 __le32 addr = READ_ONCE(q->desc[idx].buf0);
104 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
105
106 dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
107 DMA_TO_DEVICE);
108 }
109
110 if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
111 __le32 addr = READ_ONCE(q->desc[idx].buf1);
112 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
113
114 dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
115 DMA_TO_DEVICE);
116 }
117
118 if (e->txwi == DMA_DUMMY_DATA)
119 e->txwi = NULL;
120
121 if (e->skb == DMA_DUMMY_DATA)
122 e->skb = NULL;
123
124 *prev_e = *e;
125 memset(e, 0, sizeof(*e));
126 }
127
128 static void
mt76_dma_sync_idx(struct mt76_dev * dev,struct mt76_queue * q)129 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
130 {
131 writel(q->desc_dma, &q->regs->desc_base);
132 writel(q->ndesc, &q->regs->ring_size);
133 q->head = readl(&q->regs->dma_idx);
134 q->tail = q->head;
135 writel(q->head, &q->regs->cpu_idx);
136 }
137
138 static void
mt76_dma_tx_cleanup(struct mt76_dev * dev,enum mt76_txq_id qid,bool flush)139 mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
140 {
141 struct mt76_sw_queue *sq = &dev->q_tx[qid];
142 struct mt76_queue *q = sq->q;
143 struct mt76_queue_entry entry;
144 unsigned int n_swq_queued[4] = {};
145 unsigned int n_queued = 0;
146 bool wake = false;
147 int i, last;
148
149 if (!q)
150 return;
151
152 if (flush)
153 last = -1;
154 else
155 last = readl(&q->regs->dma_idx);
156
157 while ((q->queued > n_queued) && q->tail != last) {
158 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
159 if (entry.schedule)
160 n_swq_queued[entry.qid]++;
161
162 q->tail = (q->tail + 1) % q->ndesc;
163 n_queued++;
164
165 if (entry.skb)
166 dev->drv->tx_complete_skb(dev, qid, &entry);
167
168 if (entry.txwi) {
169 if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE))
170 mt76_put_txwi(dev, entry.txwi);
171 wake = !flush;
172 }
173
174 if (!flush && q->tail == last)
175 last = readl(&q->regs->dma_idx);
176 }
177
178 spin_lock_bh(&q->lock);
179
180 q->queued -= n_queued;
181 for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
182 if (!n_swq_queued[i])
183 continue;
184
185 dev->q_tx[i].swq_queued -= n_swq_queued[i];
186 }
187
188 if (flush)
189 mt76_dma_sync_idx(dev, q);
190
191 wake = wake && q->stopped &&
192 qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
193 if (wake)
194 q->stopped = false;
195
196 if (!q->queued)
197 wake_up(&dev->tx_wait);
198
199 spin_unlock_bh(&q->lock);
200
201 if (wake)
202 ieee80211_wake_queue(dev->hw, qid);
203 }
204
205 static void *
mt76_dma_get_buf(struct mt76_dev * dev,struct mt76_queue * q,int idx,int * len,u32 * info,bool * more)206 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
207 int *len, u32 *info, bool *more)
208 {
209 struct mt76_queue_entry *e = &q->entry[idx];
210 struct mt76_desc *desc = &q->desc[idx];
211 dma_addr_t buf_addr;
212 void *buf = e->buf;
213 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
214
215 buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
216 if (len) {
217 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
218 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
219 *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
220 }
221
222 if (info)
223 *info = le32_to_cpu(desc->info);
224
225 dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
226 e->buf = NULL;
227
228 return buf;
229 }
230
231 static void *
mt76_dma_dequeue(struct mt76_dev * dev,struct mt76_queue * q,bool flush,int * len,u32 * info,bool * more)232 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
233 int *len, u32 *info, bool *more)
234 {
235 int idx = q->tail;
236
237 *more = false;
238 if (!q->queued)
239 return NULL;
240
241 if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
242 return NULL;
243
244 q->tail = (q->tail + 1) % q->ndesc;
245 q->queued--;
246
247 return mt76_dma_get_buf(dev, q, idx, len, info, more);
248 }
249
250 static void
mt76_dma_kick_queue(struct mt76_dev * dev,struct mt76_queue * q)251 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
252 {
253 writel(q->head, &q->regs->cpu_idx);
254 }
255
256 static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev * dev,enum mt76_txq_id qid,struct sk_buff * skb,u32 tx_info)257 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
258 struct sk_buff *skb, u32 tx_info)
259 {
260 struct mt76_queue *q = dev->q_tx[qid].q;
261 struct mt76_queue_buf buf;
262 dma_addr_t addr;
263
264 addr = dma_map_single(dev->dev, skb->data, skb->len,
265 DMA_TO_DEVICE);
266 if (unlikely(dma_mapping_error(dev->dev, addr)))
267 return -ENOMEM;
268
269 buf.addr = addr;
270 buf.len = skb->len;
271
272 spin_lock_bh(&q->lock);
273 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
274 mt76_dma_kick_queue(dev, q);
275 spin_unlock_bh(&q->lock);
276
277 return 0;
278 }
279
280 static int
mt76_dma_tx_queue_skb(struct mt76_dev * dev,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)281 mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
282 struct sk_buff *skb, struct mt76_wcid *wcid,
283 struct ieee80211_sta *sta)
284 {
285 struct mt76_queue *q = dev->q_tx[qid].q;
286 struct mt76_tx_info tx_info = {
287 .skb = skb,
288 };
289 int len, n = 0, ret = -ENOMEM;
290 struct mt76_queue_entry e;
291 struct mt76_txwi_cache *t;
292 struct sk_buff *iter;
293 dma_addr_t addr;
294 u8 *txwi;
295
296 t = mt76_get_txwi(dev);
297 if (!t) {
298 ieee80211_free_txskb(dev->hw, skb);
299 return -ENOMEM;
300 }
301 txwi = mt76_get_txwi_ptr(dev, t);
302
303 skb->prev = skb->next = NULL;
304 if (dev->drv->tx_aligned4_skbs)
305 mt76_insert_hdr_pad(skb);
306
307 len = skb_headlen(skb);
308 addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
309 if (unlikely(dma_mapping_error(dev->dev, addr)))
310 goto free;
311
312 tx_info.buf[n].addr = t->dma_addr;
313 tx_info.buf[n++].len = dev->drv->txwi_size;
314 tx_info.buf[n].addr = addr;
315 tx_info.buf[n++].len = len;
316
317 skb_walk_frags(skb, iter) {
318 if (n == ARRAY_SIZE(tx_info.buf))
319 goto unmap;
320
321 addr = dma_map_single(dev->dev, iter->data, iter->len,
322 DMA_TO_DEVICE);
323 if (unlikely(dma_mapping_error(dev->dev, addr)))
324 goto unmap;
325
326 tx_info.buf[n].addr = addr;
327 tx_info.buf[n++].len = iter->len;
328 }
329 tx_info.nbuf = n;
330
331 dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
332 DMA_TO_DEVICE);
333 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
334 dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
335 DMA_TO_DEVICE);
336 if (ret < 0)
337 goto unmap;
338
339 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
340 ret = -ENOMEM;
341 goto unmap;
342 }
343
344 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
345 tx_info.info, tx_info.skb, t);
346
347 unmap:
348 for (n--; n > 0; n--)
349 dma_unmap_single(dev->dev, tx_info.buf[n].addr,
350 tx_info.buf[n].len, DMA_TO_DEVICE);
351
352 free:
353 e.skb = tx_info.skb;
354 e.txwi = t;
355 dev->drv->tx_complete_skb(dev, qid, &e);
356 mt76_put_txwi(dev, t);
357 return ret;
358 }
359
360 static int
mt76_dma_rx_fill(struct mt76_dev * dev,struct mt76_queue * q)361 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
362 {
363 dma_addr_t addr;
364 void *buf;
365 int frames = 0;
366 int len = SKB_WITH_OVERHEAD(q->buf_size);
367 int offset = q->buf_offset;
368 int idx;
369
370 spin_lock_bh(&q->lock);
371
372 while (q->queued < q->ndesc - 1) {
373 struct mt76_queue_buf qbuf;
374
375 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
376 if (!buf)
377 break;
378
379 addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
380 if (unlikely(dma_mapping_error(dev->dev, addr))) {
381 skb_free_frag(buf);
382 break;
383 }
384
385 qbuf.addr = addr + offset;
386 qbuf.len = len - offset;
387 idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
388 frames++;
389 }
390
391 if (frames)
392 mt76_dma_kick_queue(dev, q);
393
394 spin_unlock_bh(&q->lock);
395
396 return frames;
397 }
398
399 static void
mt76_dma_rx_cleanup(struct mt76_dev * dev,struct mt76_queue * q)400 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
401 {
402 struct page *page;
403 void *buf;
404 bool more;
405
406 spin_lock_bh(&q->lock);
407 do {
408 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
409 if (!buf)
410 break;
411
412 skb_free_frag(buf);
413 } while (1);
414 spin_unlock_bh(&q->lock);
415
416 if (!q->rx_page.va)
417 return;
418
419 page = virt_to_page(q->rx_page.va);
420 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
421 memset(&q->rx_page, 0, sizeof(q->rx_page));
422 }
423
424 static void
mt76_dma_rx_reset(struct mt76_dev * dev,enum mt76_rxq_id qid)425 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
426 {
427 struct mt76_queue *q = &dev->q_rx[qid];
428 int i;
429
430 for (i = 0; i < q->ndesc; i++)
431 q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
432
433 mt76_dma_rx_cleanup(dev, q);
434 mt76_dma_sync_idx(dev, q);
435 mt76_dma_rx_fill(dev, q);
436
437 if (!q->rx_head)
438 return;
439
440 dev_kfree_skb(q->rx_head);
441 q->rx_head = NULL;
442 }
443
444 static void
mt76_add_fragment(struct mt76_dev * dev,struct mt76_queue * q,void * data,int len,bool more)445 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
446 int len, bool more)
447 {
448 struct page *page = virt_to_head_page(data);
449 int offset = data - page_address(page);
450 struct sk_buff *skb = q->rx_head;
451
452 offset += q->buf_offset;
453 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
454 q->buf_size);
455
456 if (more)
457 return;
458
459 q->rx_head = NULL;
460 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
461 }
462
463 static int
mt76_dma_rx_process(struct mt76_dev * dev,struct mt76_queue * q,int budget)464 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
465 {
466 int len, data_len, done = 0;
467 struct sk_buff *skb;
468 unsigned char *data;
469 bool more;
470
471 while (done < budget) {
472 u32 info;
473
474 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
475 if (!data)
476 break;
477
478 if (q->rx_head)
479 data_len = q->buf_size;
480 else
481 data_len = SKB_WITH_OVERHEAD(q->buf_size);
482
483 if (data_len < len + q->buf_offset) {
484 dev_kfree_skb(q->rx_head);
485 q->rx_head = NULL;
486
487 skb_free_frag(data);
488 continue;
489 }
490
491 if (q->rx_head) {
492 mt76_add_fragment(dev, q, data, len, more);
493 continue;
494 }
495
496 skb = build_skb(data, q->buf_size);
497 if (!skb) {
498 skb_free_frag(data);
499 continue;
500 }
501 skb_reserve(skb, q->buf_offset);
502
503 if (q == &dev->q_rx[MT_RXQ_MCU]) {
504 u32 *rxfce = (u32 *)skb->cb;
505 *rxfce = info;
506 }
507
508 __skb_put(skb, len);
509 done++;
510
511 if (more) {
512 q->rx_head = skb;
513 continue;
514 }
515
516 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
517 }
518
519 mt76_dma_rx_fill(dev, q);
520 return done;
521 }
522
523 static int
mt76_dma_rx_poll(struct napi_struct * napi,int budget)524 mt76_dma_rx_poll(struct napi_struct *napi, int budget)
525 {
526 struct mt76_dev *dev;
527 int qid, done = 0, cur;
528
529 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
530 qid = napi - dev->napi;
531
532 rcu_read_lock();
533
534 do {
535 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
536 mt76_rx_poll_complete(dev, qid, napi);
537 done += cur;
538 } while (cur && done < budget);
539
540 rcu_read_unlock();
541
542 if (done < budget) {
543 napi_complete(napi);
544 dev->drv->rx_poll_complete(dev, qid);
545 }
546
547 return done;
548 }
549
550 static int
mt76_dma_init(struct mt76_dev * dev)551 mt76_dma_init(struct mt76_dev *dev)
552 {
553 int i;
554
555 init_dummy_netdev(&dev->napi_dev);
556
557 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
558 netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
559 64);
560 mt76_dma_rx_fill(dev, &dev->q_rx[i]);
561 skb_queue_head_init(&dev->rx_skb[i]);
562 napi_enable(&dev->napi[i]);
563 }
564
565 return 0;
566 }
567
568 static const struct mt76_queue_ops mt76_dma_ops = {
569 .init = mt76_dma_init,
570 .alloc = mt76_dma_alloc_queue,
571 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
572 .tx_queue_skb = mt76_dma_tx_queue_skb,
573 .tx_cleanup = mt76_dma_tx_cleanup,
574 .rx_reset = mt76_dma_rx_reset,
575 .kick = mt76_dma_kick_queue,
576 };
577
mt76_dma_attach(struct mt76_dev * dev)578 void mt76_dma_attach(struct mt76_dev *dev)
579 {
580 dev->queue_ops = &mt76_dma_ops;
581 }
582 EXPORT_SYMBOL_GPL(mt76_dma_attach);
583
mt76_dma_cleanup(struct mt76_dev * dev)584 void mt76_dma_cleanup(struct mt76_dev *dev)
585 {
586 int i;
587
588 netif_napi_del(&dev->tx_napi);
589 for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
590 mt76_dma_tx_cleanup(dev, i, true);
591
592 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
593 netif_napi_del(&dev->napi[i]);
594 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
595 }
596 }
597 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
598