1 /*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "mt76x2.h"
19
mt76x2_mac_stop(struct mt76x2_dev * dev,bool force)20 void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
21 {
22 bool stopped = false;
23 u32 rts_cfg;
24 int i;
25
26 mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
27
28 rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
29 mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
30
31 /* Wait for MAC to become idle */
32 for (i = 0; i < 300; i++) {
33 if ((mt76_rr(dev, MT_MAC_STATUS) &
34 (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
35 mt76_rr(dev, MT_BBP(IBI, 12))) {
36 udelay(1);
37 continue;
38 }
39
40 stopped = true;
41 break;
42 }
43
44 if (force && !stopped) {
45 mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
46 mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
47
48 mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
49 mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
50 }
51
52 mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
53 }
54 EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
55
mt76x2_mac_load_tx_status(struct mt76x2_dev * dev,struct mt76x2_tx_status * stat)56 bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
57 struct mt76x2_tx_status *stat)
58 {
59 u32 stat1, stat2;
60
61 stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
62 stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
63
64 stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
65 if (!stat->valid)
66 return false;
67
68 stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
69 stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
70 stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
71 stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
72 stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
73
74 stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
75 stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
76
77 return true;
78 }
79 EXPORT_SYMBOL_GPL(mt76x2_mac_load_tx_status);
80
81 static int
mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate * txrate,u16 rate,enum nl80211_band band)82 mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
83 enum nl80211_band band)
84 {
85 u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
86
87 txrate->idx = 0;
88 txrate->flags = 0;
89 txrate->count = 1;
90
91 switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
92 case MT_PHY_TYPE_OFDM:
93 if (band == NL80211_BAND_2GHZ)
94 idx += 4;
95
96 txrate->idx = idx;
97 return 0;
98 case MT_PHY_TYPE_CCK:
99 if (idx >= 8)
100 idx -= 8;
101
102 txrate->idx = idx;
103 return 0;
104 case MT_PHY_TYPE_HT_GF:
105 txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
106 /* fall through */
107 case MT_PHY_TYPE_HT:
108 txrate->flags |= IEEE80211_TX_RC_MCS;
109 txrate->idx = idx;
110 break;
111 case MT_PHY_TYPE_VHT:
112 txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
113 txrate->idx = idx;
114 break;
115 default:
116 return -EINVAL;
117 }
118
119 switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
120 case MT_PHY_BW_20:
121 break;
122 case MT_PHY_BW_40:
123 txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
124 break;
125 case MT_PHY_BW_80:
126 txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
127 break;
128 default:
129 return -EINVAL;
130 }
131
132 if (rate & MT_RXWI_RATE_SGI)
133 txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
134
135 return 0;
136 }
137
138 static void
mt76x2_mac_fill_tx_status(struct mt76x2_dev * dev,struct ieee80211_tx_info * info,struct mt76x2_tx_status * st,int n_frames)139 mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
140 struct ieee80211_tx_info *info,
141 struct mt76x2_tx_status *st, int n_frames)
142 {
143 struct ieee80211_tx_rate *rate = info->status.rates;
144 int cur_idx, last_rate;
145 int i;
146
147 if (!n_frames)
148 return;
149
150 last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
151 mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
152 dev->mt76.chandef.chan->band);
153 if (last_rate < IEEE80211_TX_MAX_RATES - 1)
154 rate[last_rate + 1].idx = -1;
155
156 cur_idx = rate[last_rate].idx + last_rate;
157 for (i = 0; i <= last_rate; i++) {
158 rate[i].flags = rate[last_rate].flags;
159 rate[i].idx = max_t(int, 0, cur_idx - i);
160 rate[i].count = 1;
161 }
162 rate[last_rate].count = st->retry + 1 - last_rate;
163
164 info->status.ampdu_len = n_frames;
165 info->status.ampdu_ack_len = st->success ? n_frames : 0;
166
167 if (st->pktid & MT_TXWI_PKTID_PROBE)
168 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
169
170 if (st->aggr)
171 info->flags |= IEEE80211_TX_CTL_AMPDU |
172 IEEE80211_TX_STAT_AMPDU;
173
174 if (!st->ack_req)
175 info->flags |= IEEE80211_TX_CTL_NO_ACK;
176 else if (st->success)
177 info->flags |= IEEE80211_TX_STAT_ACK;
178 }
179
mt76x2_send_tx_status(struct mt76x2_dev * dev,struct mt76x2_tx_status * stat,u8 * update)180 void mt76x2_send_tx_status(struct mt76x2_dev *dev,
181 struct mt76x2_tx_status *stat, u8 *update)
182 {
183 struct ieee80211_tx_info info = {};
184 struct ieee80211_sta *sta = NULL;
185 struct mt76_wcid *wcid = NULL;
186 struct mt76x2_sta *msta = NULL;
187
188 rcu_read_lock();
189 if (stat->wcid < ARRAY_SIZE(dev->wcid))
190 wcid = rcu_dereference(dev->wcid[stat->wcid]);
191
192 if (wcid) {
193 void *priv;
194
195 priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
196 sta = container_of(priv, struct ieee80211_sta,
197 drv_priv);
198 }
199
200 if (msta && stat->aggr) {
201 u32 stat_val, stat_cache;
202
203 stat_val = stat->rate;
204 stat_val |= ((u32) stat->retry) << 16;
205 stat_cache = msta->status.rate;
206 stat_cache |= ((u32) msta->status.retry) << 16;
207
208 if (*update == 0 && stat_val == stat_cache &&
209 stat->wcid == msta->status.wcid && msta->n_frames < 32) {
210 msta->n_frames++;
211 goto out;
212 }
213
214 mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
215 msta->n_frames);
216
217 msta->status = *stat;
218 msta->n_frames = 1;
219 *update = 0;
220 } else {
221 mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
222 *update = 1;
223 }
224
225 ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
226
227 out:
228 rcu_read_unlock();
229 }
230 EXPORT_SYMBOL_GPL(mt76x2_send_tx_status);
231
232 static enum mt76x2_cipher_type
mt76x2_mac_get_key_info(struct ieee80211_key_conf * key,u8 * key_data)233 mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
234 {
235 memset(key_data, 0, 32);
236 if (!key)
237 return MT_CIPHER_NONE;
238
239 if (key->keylen > 32)
240 return MT_CIPHER_NONE;
241
242 memcpy(key_data, key->key, key->keylen);
243
244 switch (key->cipher) {
245 case WLAN_CIPHER_SUITE_WEP40:
246 return MT_CIPHER_WEP40;
247 case WLAN_CIPHER_SUITE_WEP104:
248 return MT_CIPHER_WEP104;
249 case WLAN_CIPHER_SUITE_TKIP:
250 return MT_CIPHER_TKIP;
251 case WLAN_CIPHER_SUITE_CCMP:
252 return MT_CIPHER_AES_CCMP;
253 default:
254 return MT_CIPHER_NONE;
255 }
256 }
257
mt76x2_mac_shared_key_setup(struct mt76x2_dev * dev,u8 vif_idx,u8 key_idx,struct ieee80211_key_conf * key)258 int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
259 struct ieee80211_key_conf *key)
260 {
261 enum mt76x2_cipher_type cipher;
262 u8 key_data[32];
263 u32 val;
264
265 cipher = mt76x2_mac_get_key_info(key, key_data);
266 if (cipher == MT_CIPHER_NONE && key)
267 return -EOPNOTSUPP;
268
269 val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
270 val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
271 val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
272 mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
273
274 mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
275 sizeof(key_data));
276
277 return 0;
278 }
279 EXPORT_SYMBOL_GPL(mt76x2_mac_shared_key_setup);
280
mt76x2_mac_wcid_set_key(struct mt76x2_dev * dev,u8 idx,struct ieee80211_key_conf * key)281 int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
282 struct ieee80211_key_conf *key)
283 {
284 enum mt76x2_cipher_type cipher;
285 u8 key_data[32];
286 u8 iv_data[8];
287
288 cipher = mt76x2_mac_get_key_info(key, key_data);
289 if (cipher == MT_CIPHER_NONE && key)
290 return -EOPNOTSUPP;
291
292 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
293 mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
294
295 memset(iv_data, 0, sizeof(iv_data));
296 if (key) {
297 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
298 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
299 iv_data[3] = key->keyidx << 6;
300 if (cipher >= MT_CIPHER_TKIP)
301 iv_data[3] |= 0x20;
302 }
303
304 mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
305
306 return 0;
307 }
308 EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_key);
309
310 static __le16
mt76x2_mac_tx_rate_val(struct mt76x2_dev * dev,const struct ieee80211_tx_rate * rate,u8 * nss_val)311 mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
312 const struct ieee80211_tx_rate *rate, u8 *nss_val)
313 {
314 u16 rateval;
315 u8 phy, rate_idx;
316 u8 nss = 1;
317 u8 bw = 0;
318
319 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
320 rate_idx = rate->idx;
321 nss = 1 + (rate->idx >> 4);
322 phy = MT_PHY_TYPE_VHT;
323 if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
324 bw = 2;
325 else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
326 bw = 1;
327 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
328 rate_idx = rate->idx;
329 nss = 1 + (rate->idx >> 3);
330 phy = MT_PHY_TYPE_HT;
331 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
332 phy = MT_PHY_TYPE_HT_GF;
333 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
334 bw = 1;
335 } else {
336 const struct ieee80211_rate *r;
337 int band = dev->mt76.chandef.chan->band;
338 u16 val;
339
340 r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
341 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
342 val = r->hw_value_short;
343 else
344 val = r->hw_value;
345
346 phy = val >> 8;
347 rate_idx = val & 0xff;
348 bw = 0;
349 }
350
351 rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
352 rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
353 rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
354 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
355 rateval |= MT_RXWI_RATE_SGI;
356
357 *nss_val = nss;
358 return cpu_to_le16(rateval);
359 }
360
mt76x2_mac_wcid_set_rate(struct mt76x2_dev * dev,struct mt76_wcid * wcid,const struct ieee80211_tx_rate * rate)361 void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
362 const struct ieee80211_tx_rate *rate)
363 {
364 spin_lock_bh(&dev->mt76.lock);
365 wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
366 wcid->tx_rate_set = true;
367 spin_unlock_bh(&dev->mt76.lock);
368 }
369 EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_rate);
370
mt76x2_mac_write_txwi(struct mt76x2_dev * dev,struct mt76x2_txwi * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta,int len)371 void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
372 struct sk_buff *skb, struct mt76_wcid *wcid,
373 struct ieee80211_sta *sta, int len)
374 {
375 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
376 struct ieee80211_tx_rate *rate = &info->control.rates[0];
377 struct ieee80211_key_conf *key = info->control.hw_key;
378 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
379 u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
380 u16 txwi_flags = 0;
381 u8 nss;
382 s8 txpwr_adj, max_txpwr_adj;
383 u8 ccmp_pn[8];
384
385 memset(txwi, 0, sizeof(*txwi));
386
387 if (wcid)
388 txwi->wcid = wcid->idx;
389 else
390 txwi->wcid = 0xff;
391
392 txwi->pktid = 1;
393
394 if (wcid && wcid->sw_iv && key) {
395 u64 pn = atomic64_inc_return(&key->tx_pn);
396 ccmp_pn[0] = pn;
397 ccmp_pn[1] = pn >> 8;
398 ccmp_pn[2] = 0;
399 ccmp_pn[3] = 0x20 | (key->keyidx << 6);
400 ccmp_pn[4] = pn >> 16;
401 ccmp_pn[5] = pn >> 24;
402 ccmp_pn[6] = pn >> 32;
403 ccmp_pn[7] = pn >> 40;
404 txwi->iv = *((__le32 *)&ccmp_pn[0]);
405 txwi->eiv = *((__le32 *)&ccmp_pn[1]);
406 }
407
408 spin_lock_bh(&dev->mt76.lock);
409 if (wcid && (rate->idx < 0 || !rate->count)) {
410 txwi->rate = wcid->tx_rate;
411 max_txpwr_adj = wcid->max_txpwr_adj;
412 nss = wcid->tx_rate_nss;
413 } else {
414 txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
415 max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
416 }
417 spin_unlock_bh(&dev->mt76.lock);
418
419 txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
420 max_txpwr_adj);
421 txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
422
423 if (mt76xx_rev(dev) >= MT76XX_REV_E4)
424 txwi->txstream = 0x13;
425 else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
426 !(txwi->rate & cpu_to_le16(rate_ht_mask)))
427 txwi->txstream = 0x93;
428
429 if (info->flags & IEEE80211_TX_CTL_LDPC)
430 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
431 if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
432 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
433 if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
434 txwi_flags |= MT_TXWI_FLAGS_MMPS;
435 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
436 txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
437 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
438 txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
439 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
440 txwi->pktid |= MT_TXWI_PKTID_PROBE;
441 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
442 u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
443
444 ba_size <<= sta->ht_cap.ampdu_factor;
445 ba_size = min_t(int, 63, ba_size - 1);
446 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
447 ba_size = 0;
448 txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
449
450 txwi_flags |= MT_TXWI_FLAGS_AMPDU |
451 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
452 sta->ht_cap.ampdu_density);
453 }
454
455 if (ieee80211_is_probe_resp(hdr->frame_control) ||
456 ieee80211_is_beacon(hdr->frame_control))
457 txwi_flags |= MT_TXWI_FLAGS_TS;
458
459 txwi->flags |= cpu_to_le16(txwi_flags);
460 txwi->len_ctl = cpu_to_le16(len);
461 }
462 EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi);
463
mt76x2_mac_wcid_set_drop(struct mt76x2_dev * dev,u8 idx,bool drop)464 void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
465 {
466 u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
467 u32 bit = MT_WCID_DROP_MASK(idx);
468
469 /* prevent unnecessary writes */
470 if ((val & bit) != (bit * drop))
471 mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
472 }
473 EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_drop);
474
mt76x2_mac_wcid_setup(struct mt76x2_dev * dev,u8 idx,u8 vif_idx,u8 * mac)475 void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
476 {
477 struct mt76_wcid_addr addr = {};
478 u32 attr;
479
480 attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
481 FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
482
483 mt76_wr(dev, MT_WCID_ATTR(idx), attr);
484
485 mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
486 mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
487
488 if (idx >= 128)
489 return;
490
491 if (mac)
492 memcpy(addr.macaddr, mac, ETH_ALEN);
493
494 mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
495 }
496 EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_setup);
497
498 static int
mt76x2_mac_process_rate(struct mt76_rx_status * status,u16 rate)499 mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
500 {
501 u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
502
503 switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
504 case MT_PHY_TYPE_OFDM:
505 if (idx >= 8)
506 idx = 0;
507
508 if (status->band == NL80211_BAND_2GHZ)
509 idx += 4;
510
511 status->rate_idx = idx;
512 return 0;
513 case MT_PHY_TYPE_CCK:
514 if (idx >= 8) {
515 idx -= 8;
516 status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
517 }
518
519 if (idx >= 4)
520 idx = 0;
521
522 status->rate_idx = idx;
523 return 0;
524 case MT_PHY_TYPE_HT_GF:
525 status->enc_flags |= RX_ENC_FLAG_HT_GF;
526 /* fall through */
527 case MT_PHY_TYPE_HT:
528 status->encoding = RX_ENC_HT;
529 status->rate_idx = idx;
530 break;
531 case MT_PHY_TYPE_VHT:
532 status->encoding = RX_ENC_VHT;
533 status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
534 status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
535 break;
536 default:
537 return -EINVAL;
538 }
539
540 if (rate & MT_RXWI_RATE_LDPC)
541 status->enc_flags |= RX_ENC_FLAG_LDPC;
542
543 if (rate & MT_RXWI_RATE_SGI)
544 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
545
546 if (rate & MT_RXWI_RATE_STBC)
547 status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
548
549 switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
550 case MT_PHY_BW_20:
551 break;
552 case MT_PHY_BW_40:
553 status->bw = RATE_INFO_BW_40;
554 break;
555 case MT_PHY_BW_80:
556 status->bw = RATE_INFO_BW_80;
557 break;
558 default:
559 break;
560 }
561
562 return 0;
563 }
564
mt76x2_remove_hdr_pad(struct sk_buff * skb,int len)565 static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
566 {
567 int hdrlen;
568
569 if (!len)
570 return;
571
572 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
573 memmove(skb->data + len, skb->data, hdrlen);
574 skb_pull(skb, len);
575 }
576
mt76x2_mac_get_rssi(struct mt76x2_dev * dev,s8 rssi,int chain)577 int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
578 {
579 struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
580
581 rssi += cal->rssi_offset[chain];
582 rssi -= cal->lna_gain;
583
584 return rssi;
585 }
586
587 static struct mt76x2_sta *
mt76x2_rx_get_sta(struct mt76x2_dev * dev,u8 idx)588 mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
589 {
590 struct mt76_wcid *wcid;
591
592 if (idx >= ARRAY_SIZE(dev->wcid))
593 return NULL;
594
595 wcid = rcu_dereference(dev->wcid[idx]);
596 if (!wcid)
597 return NULL;
598
599 return container_of(wcid, struct mt76x2_sta, wcid);
600 }
601
602 static struct mt76_wcid *
mt76x2_rx_get_sta_wcid(struct mt76x2_dev * dev,struct mt76x2_sta * sta,bool unicast)603 mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta,
604 bool unicast)
605 {
606 if (!sta)
607 return NULL;
608
609 if (unicast)
610 return &sta->wcid;
611 else
612 return &sta->vif->group_wcid;
613 }
614
mt76x2_mac_process_rx(struct mt76x2_dev * dev,struct sk_buff * skb,void * rxi)615 int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
616 void *rxi)
617 {
618 struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
619 struct mt76x2_rxwi *rxwi = rxi;
620 struct mt76x2_sta *sta;
621 u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
622 u32 ctl = le32_to_cpu(rxwi->ctl);
623 u16 rate = le16_to_cpu(rxwi->rate);
624 u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
625 bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
626 int pad_len = 0;
627 u8 pn_len;
628 u8 wcid;
629 int len;
630
631 if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
632 return -EINVAL;
633
634 if (rxinfo & MT_RXINFO_L2PAD)
635 pad_len += 2;
636
637 if (rxinfo & MT_RXINFO_DECRYPT) {
638 status->flag |= RX_FLAG_DECRYPTED;
639 status->flag |= RX_FLAG_MMIC_STRIPPED;
640 status->flag |= RX_FLAG_MIC_STRIPPED;
641 status->flag |= RX_FLAG_IV_STRIPPED;
642 }
643
644 wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
645 sta = mt76x2_rx_get_sta(dev, wcid);
646 status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
647
648 len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
649 pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
650 if (pn_len) {
651 int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
652 u8 *data = skb->data + offset;
653
654 status->iv[0] = data[7];
655 status->iv[1] = data[6];
656 status->iv[2] = data[5];
657 status->iv[3] = data[4];
658 status->iv[4] = data[1];
659 status->iv[5] = data[0];
660
661 /*
662 * Driver CCMP validation can't deal with fragments.
663 * Let mac80211 take care of it.
664 */
665 if (rxinfo & MT_RXINFO_FRAG) {
666 status->flag &= ~RX_FLAG_IV_STRIPPED;
667 } else {
668 pad_len += pn_len << 2;
669 len -= pn_len << 2;
670 }
671 }
672
673 mt76x2_remove_hdr_pad(skb, pad_len);
674
675 if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
676 status->aggr = true;
677
678 if (WARN_ON_ONCE(len > skb->len))
679 return -EINVAL;
680
681 pskb_trim(skb, len);
682 status->chains = BIT(0) | BIT(1);
683 status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0);
684 status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1);
685 status->signal = max(status->chain_signal[0], status->chain_signal[1]);
686 status->freq = dev->mt76.chandef.chan->center_freq;
687 status->band = dev->mt76.chandef.chan->band;
688
689 status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
690 status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
691
692 if (sta) {
693 ewma_signal_add(&sta->rssi, status->signal);
694 sta->inactive_count = 0;
695 }
696
697 return mt76x2_mac_process_rate(status, rate);
698 }
699 EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx);
700