1 /*
2  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "mt76x2.h"
18 #include "mt76x2_dma.h"
19 
20 int
mt76x2_tx_queue_mcu(struct mt76x2_dev * dev,enum mt76_txq_id qid,struct sk_buff * skb,int cmd,int seq)21 mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
22 		    struct sk_buff *skb, int cmd, int seq)
23 {
24 	struct mt76_queue *q = &dev->mt76.q_tx[qid];
25 	struct mt76_queue_buf buf;
26 	dma_addr_t addr;
27 	u32 tx_info;
28 
29 	tx_info = MT_MCU_MSG_TYPE_CMD |
30 		  FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
31 		  FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
32 		  FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
33 		  FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
34 
35 	addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
36 			      DMA_TO_DEVICE);
37 	if (dma_mapping_error(dev->mt76.dev, addr))
38 		return -ENOMEM;
39 
40 	buf.addr = addr;
41 	buf.len = skb->len;
42 	spin_lock_bh(&q->lock);
43 	mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
44 	mt76_queue_kick(dev, q);
45 	spin_unlock_bh(&q->lock);
46 
47 	return 0;
48 }
49 
50 static int
mt76x2_init_tx_queue(struct mt76x2_dev * dev,struct mt76_queue * q,int idx,int n_desc)51 mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
52 		     int idx, int n_desc)
53 {
54 	int ret;
55 
56 	q->regs = dev->mt76.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
57 	q->ndesc = n_desc;
58 	q->hw_idx = idx;
59 
60 	ret = mt76_queue_alloc(dev, q);
61 	if (ret)
62 		return ret;
63 
64 	mt76x2_irq_enable(dev, MT_INT_TX_DONE(idx));
65 
66 	return 0;
67 }
68 
69 static int
mt76x2_init_rx_queue(struct mt76x2_dev * dev,struct mt76_queue * q,int idx,int n_desc,int bufsize)70 mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
71 		     int idx, int n_desc, int bufsize)
72 {
73 	int ret;
74 
75 	q->regs = dev->mt76.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
76 	q->ndesc = n_desc;
77 	q->buf_size = bufsize;
78 
79 	ret = mt76_queue_alloc(dev, q);
80 	if (ret)
81 		return ret;
82 
83 	mt76x2_irq_enable(dev, MT_INT_RX_DONE(idx));
84 
85 	return 0;
86 }
87 
88 static void
mt76x2_tx_tasklet(unsigned long data)89 mt76x2_tx_tasklet(unsigned long data)
90 {
91 	struct mt76x2_dev *dev = (struct mt76x2_dev *) data;
92 	int i;
93 
94 	mt76x2_mac_process_tx_status_fifo(dev);
95 
96 	for (i = MT_TXQ_MCU; i >= 0; i--)
97 		mt76_queue_tx_cleanup(dev, i, false);
98 
99 	mt76x2_mac_poll_tx_status(dev, false);
100 	mt76x2_irq_enable(dev, MT_INT_TX_DONE_ALL);
101 }
102 
mt76x2_dma_init(struct mt76x2_dev * dev)103 int mt76x2_dma_init(struct mt76x2_dev *dev)
104 {
105 	static const u8 wmm_queue_map[] = {
106 		[IEEE80211_AC_BE] = 0,
107 		[IEEE80211_AC_BK] = 1,
108 		[IEEE80211_AC_VI] = 2,
109 		[IEEE80211_AC_VO] = 3,
110 	};
111 	int ret;
112 	int i;
113 	struct mt76_txwi_cache __maybe_unused *t;
114 	struct mt76_queue *q;
115 
116 	BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x2_txwi));
117 	BUILD_BUG_ON(sizeof(struct mt76x2_rxwi) > MT_RX_HEADROOM);
118 
119 	mt76_dma_attach(&dev->mt76);
120 
121 	init_waitqueue_head(&dev->mcu.wait);
122 	skb_queue_head_init(&dev->mcu.res_q);
123 
124 	tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long) dev);
125 
126 	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
127 
128 	for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
129 		ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[i],
130 					   wmm_queue_map[i], MT_TX_RING_SIZE);
131 		if (ret)
132 			return ret;
133 	}
134 
135 	ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
136 				   MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
137 	if (ret)
138 		return ret;
139 
140 	ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
141 				   MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
142 	if (ret)
143 		return ret;
144 
145 	ret = mt76x2_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
146 				   MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
147 	if (ret)
148 		return ret;
149 
150 	q = &dev->mt76.q_rx[MT_RXQ_MAIN];
151 	q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x2_rxwi);
152 	ret = mt76x2_init_rx_queue(dev, q, 0, MT76x2_RX_RING_SIZE, MT_RX_BUF_SIZE);
153 	if (ret)
154 		return ret;
155 
156 	return mt76_init_queues(dev);
157 }
158 
mt76x2_dma_cleanup(struct mt76x2_dev * dev)159 void mt76x2_dma_cleanup(struct mt76x2_dev *dev)
160 {
161 	tasklet_kill(&dev->tx_tasklet);
162 	mt76_dma_cleanup(&dev->mt76);
163 }
164