1 /*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/dma-mapping.h>
18 #include "mt76.h"
19 #include "dma.h"
20
21 #define DMA_DUMMY_TXWI ((void *) ~0)
22
23 static int
mt76_dma_alloc_queue(struct mt76_dev * dev,struct mt76_queue * q)24 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
25 {
26 int size;
27 int i;
28
29 spin_lock_init(&q->lock);
30 INIT_LIST_HEAD(&q->swq);
31
32 size = q->ndesc * sizeof(struct mt76_desc);
33 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
34 if (!q->desc)
35 return -ENOMEM;
36
37 size = q->ndesc * sizeof(*q->entry);
38 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
39 if (!q->entry)
40 return -ENOMEM;
41
42 /* clear descriptors */
43 for (i = 0; i < q->ndesc; i++)
44 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
45
46 iowrite32(q->desc_dma, &q->regs->desc_base);
47 iowrite32(0, &q->regs->cpu_idx);
48 iowrite32(0, &q->regs->dma_idx);
49 iowrite32(q->ndesc, &q->regs->ring_size);
50
51 return 0;
52 }
53
54 static int
mt76_dma_add_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,int nbufs,u32 info,struct sk_buff * skb,void * txwi)55 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
56 struct mt76_queue_buf *buf, int nbufs, u32 info,
57 struct sk_buff *skb, void *txwi)
58 {
59 struct mt76_desc *desc;
60 u32 ctrl;
61 int i, idx = -1;
62
63 if (txwi)
64 q->entry[q->head].txwi = DMA_DUMMY_TXWI;
65
66 for (i = 0; i < nbufs; i += 2, buf += 2) {
67 u32 buf0 = buf[0].addr, buf1 = 0;
68
69 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
70 if (i < nbufs - 1) {
71 buf1 = buf[1].addr;
72 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
73 }
74
75 if (i == nbufs - 1)
76 ctrl |= MT_DMA_CTL_LAST_SEC0;
77 else if (i == nbufs - 2)
78 ctrl |= MT_DMA_CTL_LAST_SEC1;
79
80 idx = q->head;
81 q->head = (q->head + 1) % q->ndesc;
82
83 desc = &q->desc[idx];
84
85 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
86 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
87 WRITE_ONCE(desc->info, cpu_to_le32(info));
88 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
89
90 q->queued++;
91 }
92
93 q->entry[idx].txwi = txwi;
94 q->entry[idx].skb = skb;
95
96 return idx;
97 }
98
99 static void
mt76_dma_tx_cleanup_idx(struct mt76_dev * dev,struct mt76_queue * q,int idx,struct mt76_queue_entry * prev_e)100 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
101 struct mt76_queue_entry *prev_e)
102 {
103 struct mt76_queue_entry *e = &q->entry[idx];
104 __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
105 u32 ctrl = le32_to_cpu(__ctrl);
106
107 if (!e->txwi || !e->skb) {
108 __le32 addr = READ_ONCE(q->desc[idx].buf0);
109 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
110
111 dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
112 DMA_TO_DEVICE);
113 }
114
115 if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
116 __le32 addr = READ_ONCE(q->desc[idx].buf1);
117 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
118
119 dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
120 DMA_TO_DEVICE);
121 }
122
123 if (e->txwi == DMA_DUMMY_TXWI)
124 e->txwi = NULL;
125
126 *prev_e = *e;
127 memset(e, 0, sizeof(*e));
128 }
129
130 static void
mt76_dma_sync_idx(struct mt76_dev * dev,struct mt76_queue * q)131 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
132 {
133 q->head = ioread32(&q->regs->dma_idx);
134 q->tail = q->head;
135 iowrite32(q->head, &q->regs->cpu_idx);
136 }
137
138 static void
mt76_dma_tx_cleanup(struct mt76_dev * dev,enum mt76_txq_id qid,bool flush)139 mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
140 {
141 struct mt76_queue *q = &dev->q_tx[qid];
142 struct mt76_queue_entry entry;
143 bool wake = false;
144 int last;
145
146 if (!q->ndesc)
147 return;
148
149 spin_lock_bh(&q->lock);
150 if (flush)
151 last = -1;
152 else
153 last = ioread32(&q->regs->dma_idx);
154
155 while (q->queued && q->tail != last) {
156 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
157 if (entry.schedule)
158 q->swq_queued--;
159
160 if (entry.skb)
161 dev->drv->tx_complete_skb(dev, q, &entry, flush);
162
163 if (entry.txwi) {
164 mt76_put_txwi(dev, entry.txwi);
165 wake = true;
166 }
167
168 q->tail = (q->tail + 1) % q->ndesc;
169 q->queued--;
170
171 if (!flush && q->tail == last)
172 last = ioread32(&q->regs->dma_idx);
173 }
174
175 if (!flush)
176 mt76_txq_schedule(dev, q);
177 else
178 mt76_dma_sync_idx(dev, q);
179
180 wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
181
182 if (!q->queued)
183 wake_up(&dev->tx_wait);
184
185 spin_unlock_bh(&q->lock);
186
187 if (wake)
188 ieee80211_wake_queue(dev->hw, qid);
189 }
190
191 static void *
mt76_dma_get_buf(struct mt76_dev * dev,struct mt76_queue * q,int idx,int * len,u32 * info,bool * more)192 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
193 int *len, u32 *info, bool *more)
194 {
195 struct mt76_queue_entry *e = &q->entry[idx];
196 struct mt76_desc *desc = &q->desc[idx];
197 dma_addr_t buf_addr;
198 void *buf = e->buf;
199 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
200
201 buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
202 if (len) {
203 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
204 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
205 *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
206 }
207
208 if (info)
209 *info = le32_to_cpu(desc->info);
210
211 dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
212 e->buf = NULL;
213
214 return buf;
215 }
216
217 static void *
mt76_dma_dequeue(struct mt76_dev * dev,struct mt76_queue * q,bool flush,int * len,u32 * info,bool * more)218 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
219 int *len, u32 *info, bool *more)
220 {
221 int idx = q->tail;
222
223 *more = false;
224 if (!q->queued)
225 return NULL;
226
227 if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
228 return NULL;
229
230 q->tail = (q->tail + 1) % q->ndesc;
231 q->queued--;
232
233 return mt76_dma_get_buf(dev, q, idx, len, info, more);
234 }
235
236 static void
mt76_dma_kick_queue(struct mt76_dev * dev,struct mt76_queue * q)237 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
238 {
239 iowrite32(q->head, &q->regs->cpu_idx);
240 }
241
mt76_dma_tx_queue_skb(struct mt76_dev * dev,struct mt76_queue * q,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)242 int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
243 struct sk_buff *skb, struct mt76_wcid *wcid,
244 struct ieee80211_sta *sta)
245 {
246 struct mt76_queue_entry e;
247 struct mt76_txwi_cache *t;
248 struct mt76_queue_buf buf[32];
249 struct sk_buff *iter;
250 dma_addr_t addr;
251 int len;
252 u32 tx_info = 0;
253 int n, ret;
254
255 t = mt76_get_txwi(dev);
256 if (!t) {
257 ieee80211_free_txskb(dev->hw, skb);
258 return -ENOMEM;
259 }
260
261 dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
262 DMA_TO_DEVICE);
263 ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
264 &tx_info);
265 dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
266 DMA_TO_DEVICE);
267 if (ret < 0)
268 goto free;
269
270 len = skb->len - skb->data_len;
271 addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
272 if (dma_mapping_error(dev->dev, addr)) {
273 ret = -ENOMEM;
274 goto free;
275 }
276
277 n = 0;
278 buf[n].addr = t->dma_addr;
279 buf[n++].len = dev->drv->txwi_size;
280 buf[n].addr = addr;
281 buf[n++].len = len;
282
283 skb_walk_frags(skb, iter) {
284 if (n == ARRAY_SIZE(buf))
285 goto unmap;
286
287 addr = dma_map_single(dev->dev, iter->data, iter->len,
288 DMA_TO_DEVICE);
289 if (dma_mapping_error(dev->dev, addr))
290 goto unmap;
291
292 buf[n].addr = addr;
293 buf[n++].len = iter->len;
294 }
295
296 if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
297 goto unmap;
298
299 return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
300
301 unmap:
302 ret = -ENOMEM;
303 for (n--; n > 0; n--)
304 dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
305 DMA_TO_DEVICE);
306
307 free:
308 e.skb = skb;
309 e.txwi = t;
310 dev->drv->tx_complete_skb(dev, q, &e, true);
311 mt76_put_txwi(dev, t);
312 return ret;
313 }
314 EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
315
316 static int
mt76_dma_rx_fill(struct mt76_dev * dev,struct mt76_queue * q,bool napi)317 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
318 {
319 dma_addr_t addr;
320 void *buf;
321 int frames = 0;
322 int len = SKB_WITH_OVERHEAD(q->buf_size);
323 int offset = q->buf_offset;
324 int idx;
325 void *(*alloc)(unsigned int fragsz);
326
327 if (napi)
328 alloc = napi_alloc_frag;
329 else
330 alloc = netdev_alloc_frag;
331
332 spin_lock_bh(&q->lock);
333
334 while (q->queued < q->ndesc - 1) {
335 struct mt76_queue_buf qbuf;
336
337 buf = alloc(q->buf_size);
338 if (!buf)
339 break;
340
341 addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
342 if (dma_mapping_error(dev->dev, addr)) {
343 skb_free_frag(buf);
344 break;
345 }
346
347 qbuf.addr = addr + offset;
348 qbuf.len = len - offset;
349 idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
350 frames++;
351 }
352
353 if (frames)
354 mt76_dma_kick_queue(dev, q);
355
356 spin_unlock_bh(&q->lock);
357
358 return frames;
359 }
360
361 static void
mt76_dma_rx_cleanup(struct mt76_dev * dev,struct mt76_queue * q)362 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
363 {
364 void *buf;
365 bool more;
366
367 spin_lock_bh(&q->lock);
368 do {
369 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
370 if (!buf)
371 break;
372
373 skb_free_frag(buf);
374 } while (1);
375 spin_unlock_bh(&q->lock);
376 }
377
378 static void
mt76_dma_rx_reset(struct mt76_dev * dev,enum mt76_rxq_id qid)379 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
380 {
381 struct mt76_queue *q = &dev->q_rx[qid];
382 int i;
383
384 for (i = 0; i < q->ndesc; i++)
385 q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
386
387 mt76_dma_rx_cleanup(dev, q);
388 mt76_dma_sync_idx(dev, q);
389 mt76_dma_rx_fill(dev, q, false);
390 }
391
392 static void
mt76_add_fragment(struct mt76_dev * dev,struct mt76_queue * q,void * data,int len,bool more)393 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
394 int len, bool more)
395 {
396 struct page *page = virt_to_head_page(data);
397 int offset = data - page_address(page);
398 struct sk_buff *skb = q->rx_head;
399
400 offset += q->buf_offset;
401 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
402 q->buf_size);
403
404 if (more)
405 return;
406
407 q->rx_head = NULL;
408 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
409 }
410
411 static int
mt76_dma_rx_process(struct mt76_dev * dev,struct mt76_queue * q,int budget)412 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
413 {
414 struct sk_buff *skb;
415 unsigned char *data;
416 int len;
417 int done = 0;
418 bool more;
419
420 while (done < budget) {
421 u32 info;
422
423 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
424 if (!data)
425 break;
426
427 if (q->rx_head) {
428 mt76_add_fragment(dev, q, data, len, more);
429 continue;
430 }
431
432 skb = build_skb(data, q->buf_size);
433 if (!skb) {
434 skb_free_frag(data);
435 continue;
436 }
437
438 skb_reserve(skb, q->buf_offset);
439 if (skb->tail + len > skb->end) {
440 dev_kfree_skb(skb);
441 continue;
442 }
443
444 if (q == &dev->q_rx[MT_RXQ_MCU]) {
445 u32 *rxfce = (u32 *) skb->cb;
446 *rxfce = info;
447 }
448
449 __skb_put(skb, len);
450 done++;
451
452 if (more) {
453 q->rx_head = skb;
454 continue;
455 }
456
457 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
458 }
459
460 mt76_dma_rx_fill(dev, q, true);
461 return done;
462 }
463
464 static int
mt76_dma_rx_poll(struct napi_struct * napi,int budget)465 mt76_dma_rx_poll(struct napi_struct *napi, int budget)
466 {
467 struct mt76_dev *dev;
468 int qid, done = 0, cur;
469
470 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
471 qid = napi - dev->napi;
472
473 rcu_read_lock();
474
475 do {
476 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
477 mt76_rx_poll_complete(dev, qid, napi);
478 done += cur;
479 } while (cur && done < budget);
480
481 rcu_read_unlock();
482
483 if (done < budget) {
484 napi_complete(napi);
485 dev->drv->rx_poll_complete(dev, qid);
486 }
487
488 return done;
489 }
490
491 static int
mt76_dma_init(struct mt76_dev * dev)492 mt76_dma_init(struct mt76_dev *dev)
493 {
494 int i;
495
496 init_dummy_netdev(&dev->napi_dev);
497
498 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
499 netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
500 64);
501 mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
502 skb_queue_head_init(&dev->rx_skb[i]);
503 napi_enable(&dev->napi[i]);
504 }
505
506 return 0;
507 }
508
509 static const struct mt76_queue_ops mt76_dma_ops = {
510 .init = mt76_dma_init,
511 .alloc = mt76_dma_alloc_queue,
512 .add_buf = mt76_dma_add_buf,
513 .tx_queue_skb = mt76_dma_tx_queue_skb,
514 .tx_cleanup = mt76_dma_tx_cleanup,
515 .rx_reset = mt76_dma_rx_reset,
516 .kick = mt76_dma_kick_queue,
517 };
518
mt76_dma_attach(struct mt76_dev * dev)519 int mt76_dma_attach(struct mt76_dev *dev)
520 {
521 dev->queue_ops = &mt76_dma_ops;
522 return 0;
523 }
524 EXPORT_SYMBOL_GPL(mt76_dma_attach);
525
mt76_dma_cleanup(struct mt76_dev * dev)526 void mt76_dma_cleanup(struct mt76_dev *dev)
527 {
528 int i;
529
530 for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
531 mt76_dma_tx_cleanup(dev, i, true);
532
533 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
534 netif_napi_del(&dev->napi[i]);
535 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
536 }
537 }
538 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
539