1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include <linux/dma-mapping.h>
7 #include "mt76.h"
8 #include "dma.h"
9 
10 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
11 
12 #define Q_READ(_dev, _q, _field) ({					\
13 	u32 _offset = offsetof(struct mt76_queue_regs, _field);		\
14 	u32 _val;							\
15 	if ((_q)->flags & MT_QFLAG_WED)					\
16 		_val = mtk_wed_device_reg_read(&(_dev)->mmio.wed,	\
17 					       ((_q)->wed_regs +	\
18 					        _offset));		\
19 	else								\
20 		_val = readl(&(_q)->regs->_field);			\
21 	_val;								\
22 })
23 
24 #define Q_WRITE(_dev, _q, _field, _val)	do {				\
25 	u32 _offset = offsetof(struct mt76_queue_regs, _field);		\
26 	if ((_q)->flags & MT_QFLAG_WED)					\
27 		mtk_wed_device_reg_write(&(_dev)->mmio.wed,		\
28 					 ((_q)->wed_regs + _offset),	\
29 					 _val);				\
30 	else								\
31 		writel(_val, &(_q)->regs->_field);			\
32 } while (0)
33 
34 #else
35 
36 #define Q_READ(_dev, _q, _field)	readl(&(_q)->regs->_field)
37 #define Q_WRITE(_dev, _q, _field, _val)	writel(_val, &(_q)->regs->_field)
38 
39 #endif
40 
41 static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev * dev)42 mt76_alloc_txwi(struct mt76_dev *dev)
43 {
44 	struct mt76_txwi_cache *t;
45 	dma_addr_t addr;
46 	u8 *txwi;
47 	int size;
48 
49 	size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
50 	txwi = kzalloc(size, GFP_ATOMIC);
51 	if (!txwi)
52 		return NULL;
53 
54 	addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
55 			      DMA_TO_DEVICE);
56 	t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
57 	t->dma_addr = addr;
58 
59 	return t;
60 }
61 
62 static struct mt76_txwi_cache *
mt76_alloc_rxwi(struct mt76_dev * dev)63 mt76_alloc_rxwi(struct mt76_dev *dev)
64 {
65 	struct mt76_txwi_cache *t;
66 
67 	t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
68 	if (!t)
69 		return NULL;
70 
71 	t->ptr = NULL;
72 	return t;
73 }
74 
75 static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev * dev)76 __mt76_get_txwi(struct mt76_dev *dev)
77 {
78 	struct mt76_txwi_cache *t = NULL;
79 
80 	spin_lock(&dev->lock);
81 	if (!list_empty(&dev->txwi_cache)) {
82 		t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
83 				     list);
84 		list_del(&t->list);
85 	}
86 	spin_unlock(&dev->lock);
87 
88 	return t;
89 }
90 
91 static struct mt76_txwi_cache *
__mt76_get_rxwi(struct mt76_dev * dev)92 __mt76_get_rxwi(struct mt76_dev *dev)
93 {
94 	struct mt76_txwi_cache *t = NULL;
95 
96 	spin_lock_bh(&dev->wed_lock);
97 	if (!list_empty(&dev->rxwi_cache)) {
98 		t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
99 				     list);
100 		list_del(&t->list);
101 	}
102 	spin_unlock_bh(&dev->wed_lock);
103 
104 	return t;
105 }
106 
107 static struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev * dev)108 mt76_get_txwi(struct mt76_dev *dev)
109 {
110 	struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
111 
112 	if (t)
113 		return t;
114 
115 	return mt76_alloc_txwi(dev);
116 }
117 
118 struct mt76_txwi_cache *
mt76_get_rxwi(struct mt76_dev * dev)119 mt76_get_rxwi(struct mt76_dev *dev)
120 {
121 	struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
122 
123 	if (t)
124 		return t;
125 
126 	return mt76_alloc_rxwi(dev);
127 }
128 EXPORT_SYMBOL_GPL(mt76_get_rxwi);
129 
130 void
mt76_put_txwi(struct mt76_dev * dev,struct mt76_txwi_cache * t)131 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
132 {
133 	if (!t)
134 		return;
135 
136 	spin_lock(&dev->lock);
137 	list_add(&t->list, &dev->txwi_cache);
138 	spin_unlock(&dev->lock);
139 }
140 EXPORT_SYMBOL_GPL(mt76_put_txwi);
141 
142 void
mt76_put_rxwi(struct mt76_dev * dev,struct mt76_txwi_cache * t)143 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
144 {
145 	if (!t)
146 		return;
147 
148 	spin_lock_bh(&dev->wed_lock);
149 	list_add(&t->list, &dev->rxwi_cache);
150 	spin_unlock_bh(&dev->wed_lock);
151 }
152 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
153 
154 static void
mt76_free_pending_txwi(struct mt76_dev * dev)155 mt76_free_pending_txwi(struct mt76_dev *dev)
156 {
157 	struct mt76_txwi_cache *t;
158 
159 	local_bh_disable();
160 	while ((t = __mt76_get_txwi(dev)) != NULL) {
161 		dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
162 				 DMA_TO_DEVICE);
163 		kfree(mt76_get_txwi_ptr(dev, t));
164 	}
165 	local_bh_enable();
166 }
167 
168 void
mt76_free_pending_rxwi(struct mt76_dev * dev)169 mt76_free_pending_rxwi(struct mt76_dev *dev)
170 {
171 	struct mt76_txwi_cache *t;
172 
173 	local_bh_disable();
174 	while ((t = __mt76_get_rxwi(dev)) != NULL) {
175 		if (t->ptr)
176 			mt76_put_page_pool_buf(t->ptr, false);
177 		kfree(t);
178 	}
179 	local_bh_enable();
180 }
181 EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
182 
183 static void
mt76_dma_sync_idx(struct mt76_dev * dev,struct mt76_queue * q)184 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
185 {
186 	Q_WRITE(dev, q, desc_base, q->desc_dma);
187 	Q_WRITE(dev, q, ring_size, q->ndesc);
188 	q->head = Q_READ(dev, q, dma_idx);
189 	q->tail = q->head;
190 }
191 
192 static void
mt76_dma_queue_reset(struct mt76_dev * dev,struct mt76_queue * q)193 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
194 {
195 	int i;
196 
197 	if (!q || !q->ndesc)
198 		return;
199 
200 	/* clear descriptors */
201 	for (i = 0; i < q->ndesc; i++)
202 		q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
203 
204 	Q_WRITE(dev, q, cpu_idx, 0);
205 	Q_WRITE(dev, q, dma_idx, 0);
206 	mt76_dma_sync_idx(dev, q);
207 }
208 
209 static int
mt76_dma_add_rx_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,void * data)210 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
211 		    struct mt76_queue_buf *buf, void *data)
212 {
213 	struct mt76_desc *desc = &q->desc[q->head];
214 	struct mt76_queue_entry *entry = &q->entry[q->head];
215 	struct mt76_txwi_cache *txwi = NULL;
216 	u32 buf1 = 0, ctrl;
217 	int idx = q->head;
218 	int rx_token;
219 
220 	ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
221 
222 	if (mt76_queue_is_wed_rx(q)) {
223 		txwi = mt76_get_rxwi(dev);
224 		if (!txwi)
225 			return -ENOMEM;
226 
227 		rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
228 		if (rx_token < 0) {
229 			mt76_put_rxwi(dev, txwi);
230 			return -ENOMEM;
231 		}
232 
233 		buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
234 		ctrl |= MT_DMA_CTL_TO_HOST;
235 	}
236 
237 	WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
238 	WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
239 	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
240 	WRITE_ONCE(desc->info, 0);
241 
242 	entry->dma_addr[0] = buf->addr;
243 	entry->dma_len[0] = buf->len;
244 	entry->txwi = txwi;
245 	entry->buf = data;
246 	entry->wcid = 0xffff;
247 	entry->skip_buf1 = true;
248 	q->head = (q->head + 1) % q->ndesc;
249 	q->queued++;
250 
251 	return idx;
252 }
253 
254 static int
mt76_dma_add_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,int nbufs,u32 info,struct sk_buff * skb,void * txwi)255 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
256 		 struct mt76_queue_buf *buf, int nbufs, u32 info,
257 		 struct sk_buff *skb, void *txwi)
258 {
259 	struct mt76_queue_entry *entry;
260 	struct mt76_desc *desc;
261 	int i, idx = -1;
262 	u32 ctrl, next;
263 
264 	if (txwi) {
265 		q->entry[q->head].txwi = DMA_DUMMY_DATA;
266 		q->entry[q->head].skip_buf0 = true;
267 	}
268 
269 	for (i = 0; i < nbufs; i += 2, buf += 2) {
270 		u32 buf0 = buf[0].addr, buf1 = 0;
271 
272 		idx = q->head;
273 		next = (q->head + 1) % q->ndesc;
274 
275 		desc = &q->desc[idx];
276 		entry = &q->entry[idx];
277 
278 		if (buf[0].skip_unmap)
279 			entry->skip_buf0 = true;
280 		entry->skip_buf1 = i == nbufs - 1;
281 
282 		entry->dma_addr[0] = buf[0].addr;
283 		entry->dma_len[0] = buf[0].len;
284 
285 		ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
286 		if (i < nbufs - 1) {
287 			entry->dma_addr[1] = buf[1].addr;
288 			entry->dma_len[1] = buf[1].len;
289 			buf1 = buf[1].addr;
290 			ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
291 			if (buf[1].skip_unmap)
292 				entry->skip_buf1 = true;
293 		}
294 
295 		if (i == nbufs - 1)
296 			ctrl |= MT_DMA_CTL_LAST_SEC0;
297 		else if (i == nbufs - 2)
298 			ctrl |= MT_DMA_CTL_LAST_SEC1;
299 
300 		WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
301 		WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
302 		WRITE_ONCE(desc->info, cpu_to_le32(info));
303 		WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
304 
305 		q->head = next;
306 		q->queued++;
307 	}
308 
309 	q->entry[idx].txwi = txwi;
310 	q->entry[idx].skb = skb;
311 	q->entry[idx].wcid = 0xffff;
312 
313 	return idx;
314 }
315 
316 static void
mt76_dma_tx_cleanup_idx(struct mt76_dev * dev,struct mt76_queue * q,int idx,struct mt76_queue_entry * prev_e)317 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
318 			struct mt76_queue_entry *prev_e)
319 {
320 	struct mt76_queue_entry *e = &q->entry[idx];
321 
322 	if (!e->skip_buf0)
323 		dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
324 				 DMA_TO_DEVICE);
325 
326 	if (!e->skip_buf1)
327 		dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
328 				 DMA_TO_DEVICE);
329 
330 	if (e->txwi == DMA_DUMMY_DATA)
331 		e->txwi = NULL;
332 
333 	if (e->skb == DMA_DUMMY_DATA)
334 		e->skb = NULL;
335 
336 	*prev_e = *e;
337 	memset(e, 0, sizeof(*e));
338 }
339 
340 static void
mt76_dma_kick_queue(struct mt76_dev * dev,struct mt76_queue * q)341 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
342 {
343 	wmb();
344 	Q_WRITE(dev, q, cpu_idx, q->head);
345 }
346 
347 static void
mt76_dma_tx_cleanup(struct mt76_dev * dev,struct mt76_queue * q,bool flush)348 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
349 {
350 	struct mt76_queue_entry entry;
351 	int last;
352 
353 	if (!q || !q->ndesc)
354 		return;
355 
356 	spin_lock_bh(&q->cleanup_lock);
357 	if (flush)
358 		last = -1;
359 	else
360 		last = Q_READ(dev, q, dma_idx);
361 
362 	while (q->queued > 0 && q->tail != last) {
363 		mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
364 		mt76_queue_tx_complete(dev, q, &entry);
365 
366 		if (entry.txwi) {
367 			if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
368 				mt76_put_txwi(dev, entry.txwi);
369 		}
370 
371 		if (!flush && q->tail == last)
372 			last = Q_READ(dev, q, dma_idx);
373 	}
374 	spin_unlock_bh(&q->cleanup_lock);
375 
376 	if (flush) {
377 		spin_lock_bh(&q->lock);
378 		mt76_dma_sync_idx(dev, q);
379 		mt76_dma_kick_queue(dev, q);
380 		spin_unlock_bh(&q->lock);
381 	}
382 
383 	if (!q->queued)
384 		wake_up(&dev->tx_wait);
385 }
386 
387 static void *
mt76_dma_get_buf(struct mt76_dev * dev,struct mt76_queue * q,int idx,int * len,u32 * info,bool * more,bool * drop)388 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
389 		 int *len, u32 *info, bool *more, bool *drop)
390 {
391 	struct mt76_queue_entry *e = &q->entry[idx];
392 	struct mt76_desc *desc = &q->desc[idx];
393 	void *buf;
394 
395 	if (len) {
396 		u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
397 		*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
398 		*more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
399 	}
400 
401 	if (info)
402 		*info = le32_to_cpu(desc->info);
403 
404 	if (mt76_queue_is_wed_rx(q)) {
405 		u32 buf1 = le32_to_cpu(desc->buf1);
406 		u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
407 		struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
408 
409 		if (!t)
410 			return NULL;
411 
412 		dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
413 				SKB_WITH_OVERHEAD(q->buf_size),
414 				page_pool_get_dma_dir(q->page_pool));
415 
416 		buf = t->ptr;
417 		t->dma_addr = 0;
418 		t->ptr = NULL;
419 
420 		mt76_put_rxwi(dev, t);
421 
422 		if (drop) {
423 			u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
424 
425 			*drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
426 					   MT_DMA_CTL_DROP));
427 
428 			*drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
429 		}
430 	} else {
431 		buf = e->buf;
432 		e->buf = NULL;
433 		dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
434 				SKB_WITH_OVERHEAD(q->buf_size),
435 				page_pool_get_dma_dir(q->page_pool));
436 	}
437 
438 	return buf;
439 }
440 
441 static void *
mt76_dma_dequeue(struct mt76_dev * dev,struct mt76_queue * q,bool flush,int * len,u32 * info,bool * more,bool * drop)442 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
443 		 int *len, u32 *info, bool *more, bool *drop)
444 {
445 	int idx = q->tail;
446 
447 	*more = false;
448 	if (!q->queued)
449 		return NULL;
450 
451 	if (flush)
452 		q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
453 	else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
454 		return NULL;
455 
456 	q->tail = (q->tail + 1) % q->ndesc;
457 	q->queued--;
458 
459 	return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
460 }
461 
462 static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev * dev,struct mt76_queue * q,struct sk_buff * skb,u32 tx_info)463 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
464 			  struct sk_buff *skb, u32 tx_info)
465 {
466 	struct mt76_queue_buf buf = {};
467 	dma_addr_t addr;
468 
469 	if (test_bit(MT76_MCU_RESET, &dev->phy.state))
470 		goto error;
471 
472 	if (q->queued + 1 >= q->ndesc - 1)
473 		goto error;
474 
475 	addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
476 			      DMA_TO_DEVICE);
477 	if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
478 		goto error;
479 
480 	buf.addr = addr;
481 	buf.len = skb->len;
482 
483 	spin_lock_bh(&q->lock);
484 	mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
485 	mt76_dma_kick_queue(dev, q);
486 	spin_unlock_bh(&q->lock);
487 
488 	return 0;
489 
490 error:
491 	dev_kfree_skb(skb);
492 	return -ENOMEM;
493 }
494 
495 static int
mt76_dma_tx_queue_skb(struct mt76_dev * dev,struct mt76_queue * q,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)496 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
497 		      enum mt76_txq_id qid, struct sk_buff *skb,
498 		      struct mt76_wcid *wcid, struct ieee80211_sta *sta)
499 {
500 	struct ieee80211_tx_status status = {
501 		.sta = sta,
502 	};
503 	struct mt76_tx_info tx_info = {
504 		.skb = skb,
505 	};
506 	struct ieee80211_hw *hw;
507 	int len, n = 0, ret = -ENOMEM;
508 	struct mt76_txwi_cache *t;
509 	struct sk_buff *iter;
510 	dma_addr_t addr;
511 	u8 *txwi;
512 
513 	if (test_bit(MT76_RESET, &dev->phy.state))
514 		goto free_skb;
515 
516 	t = mt76_get_txwi(dev);
517 	if (!t)
518 		goto free_skb;
519 
520 	txwi = mt76_get_txwi_ptr(dev, t);
521 
522 	skb->prev = skb->next = NULL;
523 	if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
524 		mt76_insert_hdr_pad(skb);
525 
526 	len = skb_headlen(skb);
527 	addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
528 	if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
529 		goto free;
530 
531 	tx_info.buf[n].addr = t->dma_addr;
532 	tx_info.buf[n++].len = dev->drv->txwi_size;
533 	tx_info.buf[n].addr = addr;
534 	tx_info.buf[n++].len = len;
535 
536 	skb_walk_frags(skb, iter) {
537 		if (n == ARRAY_SIZE(tx_info.buf))
538 			goto unmap;
539 
540 		addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
541 				      DMA_TO_DEVICE);
542 		if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
543 			goto unmap;
544 
545 		tx_info.buf[n].addr = addr;
546 		tx_info.buf[n++].len = iter->len;
547 	}
548 	tx_info.nbuf = n;
549 
550 	if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
551 		ret = -ENOMEM;
552 		goto unmap;
553 	}
554 
555 	dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
556 				DMA_TO_DEVICE);
557 	ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
558 	dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
559 				   DMA_TO_DEVICE);
560 	if (ret < 0)
561 		goto unmap;
562 
563 	return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
564 				tx_info.info, tx_info.skb, t);
565 
566 unmap:
567 	for (n--; n > 0; n--)
568 		dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
569 				 tx_info.buf[n].len, DMA_TO_DEVICE);
570 
571 free:
572 #ifdef CONFIG_NL80211_TESTMODE
573 	/* fix tx_done accounting on queue overflow */
574 	if (mt76_is_testmode_skb(dev, skb, &hw)) {
575 		struct mt76_phy *phy = hw->priv;
576 
577 		if (tx_info.skb == phy->test.tx_skb)
578 			phy->test.tx_done--;
579 	}
580 #endif
581 
582 	mt76_put_txwi(dev, t);
583 
584 free_skb:
585 	status.skb = tx_info.skb;
586 	hw = mt76_tx_status_get_hw(dev, tx_info.skb);
587 	spin_lock_bh(&dev->rx_lock);
588 	ieee80211_tx_status_ext(hw, &status);
589 	spin_unlock_bh(&dev->rx_lock);
590 
591 	return ret;
592 }
593 
594 static int
mt76_dma_rx_fill(struct mt76_dev * dev,struct mt76_queue * q,bool allow_direct)595 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
596 		 bool allow_direct)
597 {
598 	int len = SKB_WITH_OVERHEAD(q->buf_size);
599 	int frames = 0;
600 
601 	if (!q->ndesc)
602 		return 0;
603 
604 	spin_lock_bh(&q->lock);
605 
606 	while (q->queued < q->ndesc - 1) {
607 		enum dma_data_direction dir;
608 		struct mt76_queue_buf qbuf;
609 		dma_addr_t addr;
610 		int offset;
611 		void *buf;
612 
613 		buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
614 		if (!buf)
615 			break;
616 
617 		addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
618 		dir = page_pool_get_dma_dir(q->page_pool);
619 		dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
620 
621 		qbuf.addr = addr + q->buf_offset;
622 		qbuf.len = len - q->buf_offset;
623 		qbuf.skip_unmap = false;
624 		if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
625 			mt76_put_page_pool_buf(buf, allow_direct);
626 			break;
627 		}
628 		frames++;
629 	}
630 
631 	if (frames)
632 		mt76_dma_kick_queue(dev, q);
633 
634 	spin_unlock_bh(&q->lock);
635 
636 	return frames;
637 }
638 
mt76_dma_wed_setup(struct mt76_dev * dev,struct mt76_queue * q,bool reset)639 int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
640 {
641 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
642 	struct mtk_wed_device *wed = &dev->mmio.wed;
643 	int ret, type, ring;
644 	u8 flags;
645 
646 	if (!q || !q->ndesc)
647 		return -EINVAL;
648 
649 	flags = q->flags;
650 	if (!mtk_wed_device_active(wed))
651 		q->flags &= ~MT_QFLAG_WED;
652 
653 	if (!(q->flags & MT_QFLAG_WED))
654 		return 0;
655 
656 	type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
657 	ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
658 
659 	switch (type) {
660 	case MT76_WED_Q_TX:
661 		ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset);
662 		if (!ret)
663 			q->wed_regs = wed->tx_ring[ring].reg_base;
664 		break;
665 	case MT76_WED_Q_TXFREE:
666 		/* WED txfree queue needs ring to be initialized before setup */
667 		q->flags = 0;
668 		mt76_dma_queue_reset(dev, q);
669 		mt76_dma_rx_fill(dev, q, false);
670 		q->flags = flags;
671 
672 		ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
673 		if (!ret)
674 			q->wed_regs = wed->txfree_ring.reg_base;
675 		break;
676 	case MT76_WED_Q_RX:
677 		ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
678 		if (!ret)
679 			q->wed_regs = wed->rx_ring[ring].reg_base;
680 		break;
681 	default:
682 		ret = -EINVAL;
683 	}
684 
685 	return ret;
686 #else
687 	return 0;
688 #endif
689 }
690 EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
691 
692 static int
mt76_dma_alloc_queue(struct mt76_dev * dev,struct mt76_queue * q,int idx,int n_desc,int bufsize,u32 ring_base)693 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
694 		     int idx, int n_desc, int bufsize,
695 		     u32 ring_base)
696 {
697 	int ret, size;
698 
699 	spin_lock_init(&q->lock);
700 	spin_lock_init(&q->cleanup_lock);
701 
702 	q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
703 	q->ndesc = n_desc;
704 	q->buf_size = bufsize;
705 	q->hw_idx = idx;
706 
707 	size = q->ndesc * sizeof(struct mt76_desc);
708 	q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
709 	if (!q->desc)
710 		return -ENOMEM;
711 
712 	size = q->ndesc * sizeof(*q->entry);
713 	q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
714 	if (!q->entry)
715 		return -ENOMEM;
716 
717 	ret = mt76_create_page_pool(dev, q);
718 	if (ret)
719 		return ret;
720 
721 	ret = mt76_dma_wed_setup(dev, q, false);
722 	if (ret)
723 		return ret;
724 
725 	if (q->flags != MT_WED_Q_TXFREE)
726 		mt76_dma_queue_reset(dev, q);
727 
728 	return 0;
729 }
730 
731 static void
mt76_dma_rx_cleanup(struct mt76_dev * dev,struct mt76_queue * q)732 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
733 {
734 	void *buf;
735 	bool more;
736 
737 	if (!q->ndesc)
738 		return;
739 
740 	spin_lock_bh(&q->lock);
741 
742 	do {
743 		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
744 		if (!buf)
745 			break;
746 
747 		mt76_put_page_pool_buf(buf, false);
748 	} while (1);
749 
750 	if (q->rx_head) {
751 		dev_kfree_skb(q->rx_head);
752 		q->rx_head = NULL;
753 	}
754 
755 	spin_unlock_bh(&q->lock);
756 }
757 
758 static void
mt76_dma_rx_reset(struct mt76_dev * dev,enum mt76_rxq_id qid)759 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
760 {
761 	struct mt76_queue *q = &dev->q_rx[qid];
762 	int i;
763 
764 	if (!q->ndesc)
765 		return;
766 
767 	for (i = 0; i < q->ndesc; i++)
768 		q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
769 
770 	mt76_dma_rx_cleanup(dev, q);
771 
772 	/* reset WED rx queues */
773 	mt76_dma_wed_setup(dev, q, true);
774 	if (q->flags != MT_WED_Q_TXFREE) {
775 		mt76_dma_sync_idx(dev, q);
776 		mt76_dma_rx_fill(dev, q, false);
777 	}
778 }
779 
780 static void
mt76_add_fragment(struct mt76_dev * dev,struct mt76_queue * q,void * data,int len,bool more,u32 info)781 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
782 		  int len, bool more, u32 info)
783 {
784 	struct sk_buff *skb = q->rx_head;
785 	struct skb_shared_info *shinfo = skb_shinfo(skb);
786 	int nr_frags = shinfo->nr_frags;
787 
788 	if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
789 		struct page *page = virt_to_head_page(data);
790 		int offset = data - page_address(page) + q->buf_offset;
791 
792 		skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
793 	} else {
794 		mt76_put_page_pool_buf(data, true);
795 	}
796 
797 	if (more)
798 		return;
799 
800 	q->rx_head = NULL;
801 	if (nr_frags < ARRAY_SIZE(shinfo->frags))
802 		dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
803 	else
804 		dev_kfree_skb(skb);
805 }
806 
807 static int
mt76_dma_rx_process(struct mt76_dev * dev,struct mt76_queue * q,int budget)808 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
809 {
810 	int len, data_len, done = 0, dma_idx;
811 	struct sk_buff *skb;
812 	unsigned char *data;
813 	bool check_ddone = false;
814 	bool more;
815 
816 	if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
817 	    q->flags == MT_WED_Q_TXFREE) {
818 		dma_idx = Q_READ(dev, q, dma_idx);
819 		check_ddone = true;
820 	}
821 
822 	while (done < budget) {
823 		bool drop = false;
824 		u32 info;
825 
826 		if (check_ddone) {
827 			if (q->tail == dma_idx)
828 				dma_idx = Q_READ(dev, q, dma_idx);
829 
830 			if (q->tail == dma_idx)
831 				break;
832 		}
833 
834 		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
835 					&drop);
836 		if (!data)
837 			break;
838 
839 		if (drop)
840 			goto free_frag;
841 
842 		if (q->rx_head)
843 			data_len = q->buf_size;
844 		else
845 			data_len = SKB_WITH_OVERHEAD(q->buf_size);
846 
847 		if (data_len < len + q->buf_offset) {
848 			dev_kfree_skb(q->rx_head);
849 			q->rx_head = NULL;
850 			goto free_frag;
851 		}
852 
853 		if (q->rx_head) {
854 			mt76_add_fragment(dev, q, data, len, more, info);
855 			continue;
856 		}
857 
858 		if (!more && dev->drv->rx_check &&
859 		    !(dev->drv->rx_check(dev, data, len)))
860 			goto free_frag;
861 
862 		skb = napi_build_skb(data, q->buf_size);
863 		if (!skb)
864 			goto free_frag;
865 
866 		skb_reserve(skb, q->buf_offset);
867 		skb_mark_for_recycle(skb);
868 
869 		*(u32 *)skb->cb = info;
870 
871 		__skb_put(skb, len);
872 		done++;
873 
874 		if (more) {
875 			q->rx_head = skb;
876 			continue;
877 		}
878 
879 		dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
880 		continue;
881 
882 free_frag:
883 		mt76_put_page_pool_buf(data, true);
884 	}
885 
886 	mt76_dma_rx_fill(dev, q, true);
887 	return done;
888 }
889 
mt76_dma_rx_poll(struct napi_struct * napi,int budget)890 int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
891 {
892 	struct mt76_dev *dev;
893 	int qid, done = 0, cur;
894 
895 	dev = container_of(napi->dev, struct mt76_dev, napi_dev);
896 	qid = napi - dev->napi;
897 
898 	rcu_read_lock();
899 
900 	do {
901 		cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
902 		mt76_rx_poll_complete(dev, qid, napi);
903 		done += cur;
904 	} while (cur && done < budget);
905 
906 	rcu_read_unlock();
907 
908 	if (done < budget && napi_complete(napi))
909 		dev->drv->rx_poll_complete(dev, qid);
910 
911 	return done;
912 }
913 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
914 
915 static int
mt76_dma_init(struct mt76_dev * dev,int (* poll)(struct napi_struct * napi,int budget))916 mt76_dma_init(struct mt76_dev *dev,
917 	      int (*poll)(struct napi_struct *napi, int budget))
918 {
919 	int i;
920 
921 	init_dummy_netdev(&dev->napi_dev);
922 	init_dummy_netdev(&dev->tx_napi_dev);
923 	snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
924 		 wiphy_name(dev->hw->wiphy));
925 	dev->napi_dev.threaded = 1;
926 	init_completion(&dev->mmio.wed_reset);
927 	init_completion(&dev->mmio.wed_reset_complete);
928 
929 	mt76_for_each_q_rx(dev, i) {
930 		netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
931 		mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
932 		napi_enable(&dev->napi[i]);
933 	}
934 
935 	return 0;
936 }
937 
938 static const struct mt76_queue_ops mt76_dma_ops = {
939 	.init = mt76_dma_init,
940 	.alloc = mt76_dma_alloc_queue,
941 	.reset_q = mt76_dma_queue_reset,
942 	.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
943 	.tx_queue_skb = mt76_dma_tx_queue_skb,
944 	.tx_cleanup = mt76_dma_tx_cleanup,
945 	.rx_cleanup = mt76_dma_rx_cleanup,
946 	.rx_reset = mt76_dma_rx_reset,
947 	.kick = mt76_dma_kick_queue,
948 };
949 
mt76_dma_attach(struct mt76_dev * dev)950 void mt76_dma_attach(struct mt76_dev *dev)
951 {
952 	dev->queue_ops = &mt76_dma_ops;
953 }
954 EXPORT_SYMBOL_GPL(mt76_dma_attach);
955 
mt76_dma_cleanup(struct mt76_dev * dev)956 void mt76_dma_cleanup(struct mt76_dev *dev)
957 {
958 	int i;
959 
960 	mt76_worker_disable(&dev->tx_worker);
961 	netif_napi_del(&dev->tx_napi);
962 
963 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
964 		struct mt76_phy *phy = dev->phys[i];
965 		int j;
966 
967 		if (!phy)
968 			continue;
969 
970 		for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
971 			mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
972 	}
973 
974 	for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
975 		mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
976 
977 	mt76_for_each_q_rx(dev, i) {
978 		struct mt76_queue *q = &dev->q_rx[i];
979 
980 		netif_napi_del(&dev->napi[i]);
981 		mt76_dma_rx_cleanup(dev, q);
982 
983 		page_pool_destroy(q->page_pool);
984 	}
985 
986 	mt76_free_pending_txwi(dev);
987 	mt76_free_pending_rxwi(dev);
988 
989 	if (mtk_wed_device_active(&dev->mmio.wed))
990 		mtk_wed_device_detach(&dev->mmio.wed);
991 }
992 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
993