1 /*
2  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/delay.h>
18 #include "mt76x2.h"
19 #include "mt76x2_mcu.h"
20 #include "mt76x2_eeprom.h"
21 #include "mt76x2_trace.h"
22 
mt76x2_mac_set_bssid(struct mt76x2_dev * dev,u8 idx,const u8 * addr)23 void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
24 {
25 	idx &= 7;
26 	mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
27 	mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
28 		       get_unaligned_le16(addr + 4));
29 }
30 
mt76x2_mac_poll_tx_status(struct mt76x2_dev * dev,bool irq)31 void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
32 {
33 	struct mt76x2_tx_status stat = {};
34 	unsigned long flags;
35 	u8 update = 1;
36 	bool ret;
37 
38 	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
39 		return;
40 
41 	trace_mac_txstat_poll(dev);
42 
43 	while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
44 		spin_lock_irqsave(&dev->irq_lock, flags);
45 		ret = mt76x2_mac_load_tx_status(dev, &stat);
46 		spin_unlock_irqrestore(&dev->irq_lock, flags);
47 
48 		if (!ret)
49 			break;
50 
51 		trace_mac_txstat_fetch(dev, &stat);
52 
53 		if (!irq) {
54 			mt76x2_send_tx_status(dev, &stat, &update);
55 			continue;
56 		}
57 
58 		kfifo_put(&dev->txstatus_fifo, stat);
59 	}
60 }
61 
62 static void
mt76x2_mac_queue_txdone(struct mt76x2_dev * dev,struct sk_buff * skb,void * txwi_ptr)63 mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
64 			void *txwi_ptr)
65 {
66 	struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
67 	struct mt76x2_txwi *txwi = txwi_ptr;
68 
69 	mt76x2_mac_poll_tx_status(dev, false);
70 
71 	txi->tries = 0;
72 	txi->jiffies = jiffies;
73 	txi->wcid = txwi->wcid;
74 	txi->pktid = txwi->pktid;
75 	trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
76 	mt76x2_tx_complete(dev, skb);
77 }
78 
mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev * dev)79 void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
80 {
81 	struct mt76x2_tx_status stat;
82 	u8 update = 1;
83 
84 	while (kfifo_get(&dev->txstatus_fifo, &stat))
85 		mt76x2_send_tx_status(dev, &stat, &update);
86 }
87 
mt76x2_tx_complete_skb(struct mt76_dev * mdev,struct mt76_queue * q,struct mt76_queue_entry * e,bool flush)88 void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
89 			    struct mt76_queue_entry *e, bool flush)
90 {
91 	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
92 
93 	if (e->txwi)
94 		mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
95 	else
96 		dev_kfree_skb_any(e->skb);
97 }
98 
99 static int
mt76_write_beacon(struct mt76x2_dev * dev,int offset,struct sk_buff * skb)100 mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
101 {
102 	int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
103 	struct mt76x2_txwi txwi;
104 
105 	if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
106 		return -ENOSPC;
107 
108 	mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
109 
110 	mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
111 	offset += sizeof(txwi);
112 
113 	mt76_wr_copy(dev, offset, skb->data, skb->len);
114 	return 0;
115 }
116 
117 static int
__mt76x2_mac_set_beacon(struct mt76x2_dev * dev,u8 bcn_idx,struct sk_buff * skb)118 __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
119 {
120 	int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
121 	int beacon_addr = dev->beacon_offsets[bcn_idx];
122 	int ret = 0;
123 	int i;
124 
125 	/* Prevent corrupt transmissions during update */
126 	mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
127 
128 	if (skb) {
129 		ret = mt76_write_beacon(dev, beacon_addr, skb);
130 		if (!ret)
131 			dev->beacon_data_mask |= BIT(bcn_idx) &
132 						 dev->beacon_mask;
133 	} else {
134 		dev->beacon_data_mask &= ~BIT(bcn_idx);
135 		for (i = 0; i < beacon_len; i += 4)
136 			mt76_wr(dev, beacon_addr + i, 0);
137 	}
138 
139 	mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
140 
141 	return ret;
142 }
143 
mt76x2_mac_set_beacon(struct mt76x2_dev * dev,u8 vif_idx,struct sk_buff * skb)144 int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
145 			  struct sk_buff *skb)
146 {
147 	bool force_update = false;
148 	int bcn_idx = 0;
149 	int i;
150 
151 	for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
152 		if (vif_idx == i) {
153 			force_update = !!dev->beacons[i] ^ !!skb;
154 
155 			if (dev->beacons[i])
156 				dev_kfree_skb(dev->beacons[i]);
157 
158 			dev->beacons[i] = skb;
159 			__mt76x2_mac_set_beacon(dev, bcn_idx, skb);
160 		} else if (force_update && dev->beacons[i]) {
161 			__mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
162 		}
163 
164 		bcn_idx += !!dev->beacons[i];
165 	}
166 
167 	for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
168 		if (!(dev->beacon_data_mask & BIT(i)))
169 			break;
170 
171 		__mt76x2_mac_set_beacon(dev, i, NULL);
172 	}
173 
174 	mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
175 		       bcn_idx - 1);
176 	return 0;
177 }
178 
mt76x2_mac_set_beacon_enable(struct mt76x2_dev * dev,u8 vif_idx,bool val)179 void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
180 {
181 	u8 old_mask = dev->beacon_mask;
182 	bool en;
183 	u32 reg;
184 
185 	if (val) {
186 		dev->beacon_mask |= BIT(vif_idx);
187 	} else {
188 		dev->beacon_mask &= ~BIT(vif_idx);
189 		mt76x2_mac_set_beacon(dev, vif_idx, NULL);
190 	}
191 
192 	if (!!old_mask == !!dev->beacon_mask)
193 		return;
194 
195 	en = dev->beacon_mask;
196 
197 	mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
198 	reg = MT_BEACON_TIME_CFG_BEACON_TX |
199 	      MT_BEACON_TIME_CFG_TBTT_EN |
200 	      MT_BEACON_TIME_CFG_TIMER_EN;
201 	mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
202 
203 	if (en)
204 		mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
205 	else
206 		mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
207 }
208 
mt76x2_update_channel(struct mt76_dev * mdev)209 void mt76x2_update_channel(struct mt76_dev *mdev)
210 {
211 	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
212 	struct mt76_channel_state *state;
213 	u32 active, busy;
214 
215 	state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
216 
217 	busy = mt76_rr(dev, MT_CH_BUSY);
218 	active = busy + mt76_rr(dev, MT_CH_IDLE);
219 
220 	spin_lock_bh(&dev->mt76.cc_lock);
221 	state->cc_busy += busy;
222 	state->cc_active += active;
223 	spin_unlock_bh(&dev->mt76.cc_lock);
224 }
225 
mt76x2_mac_work(struct work_struct * work)226 void mt76x2_mac_work(struct work_struct *work)
227 {
228 	struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
229 					    mac_work.work);
230 	int i, idx;
231 
232 	mt76x2_update_channel(&dev->mt76);
233 	for (i = 0, idx = 0; i < 16; i++) {
234 		u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
235 
236 		dev->aggr_stats[idx++] += val & 0xffff;
237 		dev->aggr_stats[idx++] += val >> 16;
238 	}
239 
240 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
241 				     MT_CALIBRATE_INTERVAL);
242 }
243 
mt76x2_mac_set_tx_protection(struct mt76x2_dev * dev,u32 val)244 void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val)
245 {
246 	u32 data = 0;
247 
248 	if (val != ~0)
249 		data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
250 		       MT_PROT_CFG_RTS_THRESH;
251 
252 	mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
253 
254 	mt76_rmw(dev, MT_CCK_PROT_CFG,
255 		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
256 	mt76_rmw(dev, MT_OFDM_PROT_CFG,
257 		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
258 	mt76_rmw(dev, MT_MM20_PROT_CFG,
259 		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
260 	mt76_rmw(dev, MT_MM40_PROT_CFG,
261 		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
262 	mt76_rmw(dev, MT_GF20_PROT_CFG,
263 		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
264 	mt76_rmw(dev, MT_GF40_PROT_CFG,
265 		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
266 	mt76_rmw(dev, MT_TX_PROT_CFG6,
267 		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
268 	mt76_rmw(dev, MT_TX_PROT_CFG7,
269 		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
270 	mt76_rmw(dev, MT_TX_PROT_CFG8,
271 		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
272 }
273