1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include "mac.h"
20
21 #include <net/mac80211.h>
22 #include <linux/etherdevice.h>
23 #include <linux/acpi.h>
24
25 #include "hif.h"
26 #include "core.h"
27 #include "debug.h"
28 #include "wmi.h"
29 #include "htt.h"
30 #include "txrx.h"
31 #include "testmode.h"
32 #include "wmi.h"
33 #include "wmi-tlv.h"
34 #include "wmi-ops.h"
35 #include "wow.h"
36
37 /*********/
38 /* Rates */
39 /*********/
40
41 static struct ieee80211_rate ath10k_rates[] = {
42 { .bitrate = 10,
43 .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
44 { .bitrate = 20,
45 .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
46 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
47 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
48 { .bitrate = 55,
49 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
50 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
51 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
52 { .bitrate = 110,
53 .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
54 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
55 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
56
57 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
58 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
59 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
60 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
61 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
62 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
63 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
64 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
65 };
66
67 static struct ieee80211_rate ath10k_rates_rev2[] = {
68 { .bitrate = 10,
69 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
70 { .bitrate = 20,
71 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
72 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
73 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
74 { .bitrate = 55,
75 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
76 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
77 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
78 { .bitrate = 110,
79 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
80 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
81 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
82
83 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
84 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
85 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
86 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
87 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
88 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
89 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
90 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
91 };
92
93 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
94
95 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
96 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
97 ATH10K_MAC_FIRST_OFDM_RATE_IDX)
98 #define ath10k_g_rates (ath10k_rates + 0)
99 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
100
101 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
102 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
103
104 #define ath10k_wmi_legacy_rates ath10k_rates
105
ath10k_mac_bitrate_is_cck(int bitrate)106 static bool ath10k_mac_bitrate_is_cck(int bitrate)
107 {
108 switch (bitrate) {
109 case 10:
110 case 20:
111 case 55:
112 case 110:
113 return true;
114 }
115
116 return false;
117 }
118
ath10k_mac_bitrate_to_rate(int bitrate)119 static u8 ath10k_mac_bitrate_to_rate(int bitrate)
120 {
121 return DIV_ROUND_UP(bitrate, 5) |
122 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
123 }
124
ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band * sband,u8 hw_rate,bool cck)125 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
126 u8 hw_rate, bool cck)
127 {
128 const struct ieee80211_rate *rate;
129 int i;
130
131 for (i = 0; i < sband->n_bitrates; i++) {
132 rate = &sband->bitrates[i];
133
134 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
135 continue;
136
137 if (rate->hw_value == hw_rate)
138 return i;
139 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
140 rate->hw_value_short == hw_rate)
141 return i;
142 }
143
144 return 0;
145 }
146
ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band * sband,u32 bitrate)147 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
148 u32 bitrate)
149 {
150 int i;
151
152 for (i = 0; i < sband->n_bitrates; i++)
153 if (sband->bitrates[i].bitrate == bitrate)
154 return i;
155
156 return 0;
157 }
158
ath10k_mac_get_max_vht_mcs_map(u16 mcs_map,int nss)159 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
160 {
161 switch ((mcs_map >> (2 * nss)) & 0x3) {
162 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
163 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
164 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
165 }
166 return 0;
167 }
168
169 static u32
ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])170 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
171 {
172 int nss;
173
174 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
175 if (ht_mcs_mask[nss])
176 return nss + 1;
177
178 return 1;
179 }
180
181 static u32
ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])182 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
183 {
184 int nss;
185
186 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
187 if (vht_mcs_mask[nss])
188 return nss + 1;
189
190 return 1;
191 }
192
ath10k_mac_ext_resource_config(struct ath10k * ar,u32 val)193 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
194 {
195 enum wmi_host_platform_type platform_type;
196 int ret;
197
198 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
199 platform_type = WMI_HOST_PLATFORM_LOW_PERF;
200 else
201 platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
202
203 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
204
205 if (ret && ret != -EOPNOTSUPP) {
206 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
207 return ret;
208 }
209
210 return 0;
211 }
212
213 /**********/
214 /* Crypto */
215 /**********/
216
ath10k_send_key(struct ath10k_vif * arvif,struct ieee80211_key_conf * key,enum set_key_cmd cmd,const u8 * macaddr,u32 flags)217 static int ath10k_send_key(struct ath10k_vif *arvif,
218 struct ieee80211_key_conf *key,
219 enum set_key_cmd cmd,
220 const u8 *macaddr, u32 flags)
221 {
222 struct ath10k *ar = arvif->ar;
223 struct wmi_vdev_install_key_arg arg = {
224 .vdev_id = arvif->vdev_id,
225 .key_idx = key->keyidx,
226 .key_len = key->keylen,
227 .key_data = key->key,
228 .key_flags = flags,
229 .macaddr = macaddr,
230 };
231
232 lockdep_assert_held(&arvif->ar->conf_mutex);
233
234 switch (key->cipher) {
235 case WLAN_CIPHER_SUITE_CCMP:
236 arg.key_cipher = WMI_CIPHER_AES_CCM;
237 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
238 break;
239 case WLAN_CIPHER_SUITE_TKIP:
240 arg.key_cipher = WMI_CIPHER_TKIP;
241 arg.key_txmic_len = 8;
242 arg.key_rxmic_len = 8;
243 break;
244 case WLAN_CIPHER_SUITE_WEP40:
245 case WLAN_CIPHER_SUITE_WEP104:
246 arg.key_cipher = WMI_CIPHER_WEP;
247 break;
248 case WLAN_CIPHER_SUITE_CCMP_256:
249 arg.key_cipher = WMI_CIPHER_AES_CCM;
250 break;
251 case WLAN_CIPHER_SUITE_GCMP:
252 case WLAN_CIPHER_SUITE_GCMP_256:
253 arg.key_cipher = WMI_CIPHER_AES_GCM;
254 break;
255 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
256 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
257 case WLAN_CIPHER_SUITE_BIP_CMAC_256:
258 case WLAN_CIPHER_SUITE_AES_CMAC:
259 WARN_ON(1);
260 return -EINVAL;
261 default:
262 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
263 return -EOPNOTSUPP;
264 }
265
266 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
267 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
268
269 if (cmd == DISABLE_KEY) {
270 arg.key_cipher = WMI_CIPHER_NONE;
271 arg.key_data = NULL;
272 }
273
274 return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
275 }
276
ath10k_install_key(struct ath10k_vif * arvif,struct ieee80211_key_conf * key,enum set_key_cmd cmd,const u8 * macaddr,u32 flags)277 static int ath10k_install_key(struct ath10k_vif *arvif,
278 struct ieee80211_key_conf *key,
279 enum set_key_cmd cmd,
280 const u8 *macaddr, u32 flags)
281 {
282 struct ath10k *ar = arvif->ar;
283 int ret;
284 unsigned long time_left;
285
286 lockdep_assert_held(&ar->conf_mutex);
287
288 reinit_completion(&ar->install_key_done);
289
290 if (arvif->nohwcrypt)
291 return 1;
292
293 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
294 if (ret)
295 return ret;
296
297 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
298 if (time_left == 0)
299 return -ETIMEDOUT;
300
301 return 0;
302 }
303
ath10k_install_peer_wep_keys(struct ath10k_vif * arvif,const u8 * addr)304 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
305 const u8 *addr)
306 {
307 struct ath10k *ar = arvif->ar;
308 struct ath10k_peer *peer;
309 int ret;
310 int i;
311 u32 flags;
312
313 lockdep_assert_held(&ar->conf_mutex);
314
315 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
316 arvif->vif->type != NL80211_IFTYPE_ADHOC &&
317 arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
318 return -EINVAL;
319
320 spin_lock_bh(&ar->data_lock);
321 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
322 spin_unlock_bh(&ar->data_lock);
323
324 if (!peer)
325 return -ENOENT;
326
327 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
328 if (arvif->wep_keys[i] == NULL)
329 continue;
330
331 switch (arvif->vif->type) {
332 case NL80211_IFTYPE_AP:
333 flags = WMI_KEY_PAIRWISE;
334
335 if (arvif->def_wep_key_idx == i)
336 flags |= WMI_KEY_TX_USAGE;
337
338 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
339 SET_KEY, addr, flags);
340 if (ret < 0)
341 return ret;
342 break;
343 case NL80211_IFTYPE_ADHOC:
344 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
345 SET_KEY, addr,
346 WMI_KEY_PAIRWISE);
347 if (ret < 0)
348 return ret;
349
350 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
351 SET_KEY, addr, WMI_KEY_GROUP);
352 if (ret < 0)
353 return ret;
354 break;
355 default:
356 WARN_ON(1);
357 return -EINVAL;
358 }
359
360 spin_lock_bh(&ar->data_lock);
361 peer->keys[i] = arvif->wep_keys[i];
362 spin_unlock_bh(&ar->data_lock);
363 }
364
365 /* In some cases (notably with static WEP IBSS with multiple keys)
366 * multicast Tx becomes broken. Both pairwise and groupwise keys are
367 * installed already. Using WMI_KEY_TX_USAGE in different combinations
368 * didn't seem help. Using def_keyid vdev parameter seems to be
369 * effective so use that.
370 *
371 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
372 */
373 if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
374 return 0;
375
376 if (arvif->def_wep_key_idx == -1)
377 return 0;
378
379 ret = ath10k_wmi_vdev_set_param(arvif->ar,
380 arvif->vdev_id,
381 arvif->ar->wmi.vdev_param->def_keyid,
382 arvif->def_wep_key_idx);
383 if (ret) {
384 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
385 arvif->vdev_id, ret);
386 return ret;
387 }
388
389 return 0;
390 }
391
ath10k_clear_peer_keys(struct ath10k_vif * arvif,const u8 * addr)392 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
393 const u8 *addr)
394 {
395 struct ath10k *ar = arvif->ar;
396 struct ath10k_peer *peer;
397 int first_errno = 0;
398 int ret;
399 int i;
400 u32 flags = 0;
401
402 lockdep_assert_held(&ar->conf_mutex);
403
404 spin_lock_bh(&ar->data_lock);
405 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
406 spin_unlock_bh(&ar->data_lock);
407
408 if (!peer)
409 return -ENOENT;
410
411 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
412 if (peer->keys[i] == NULL)
413 continue;
414
415 /* key flags are not required to delete the key */
416 ret = ath10k_install_key(arvif, peer->keys[i],
417 DISABLE_KEY, addr, flags);
418 if (ret < 0 && first_errno == 0)
419 first_errno = ret;
420
421 if (ret < 0)
422 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
423 i, ret);
424
425 spin_lock_bh(&ar->data_lock);
426 peer->keys[i] = NULL;
427 spin_unlock_bh(&ar->data_lock);
428 }
429
430 return first_errno;
431 }
432
ath10k_mac_is_peer_wep_key_set(struct ath10k * ar,const u8 * addr,u8 keyidx)433 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
434 u8 keyidx)
435 {
436 struct ath10k_peer *peer;
437 int i;
438
439 lockdep_assert_held(&ar->data_lock);
440
441 /* We don't know which vdev this peer belongs to,
442 * since WMI doesn't give us that information.
443 *
444 * FIXME: multi-bss needs to be handled.
445 */
446 peer = ath10k_peer_find(ar, 0, addr);
447 if (!peer)
448 return false;
449
450 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
451 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
452 return true;
453 }
454
455 return false;
456 }
457
ath10k_clear_vdev_key(struct ath10k_vif * arvif,struct ieee80211_key_conf * key)458 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
459 struct ieee80211_key_conf *key)
460 {
461 struct ath10k *ar = arvif->ar;
462 struct ath10k_peer *peer;
463 u8 addr[ETH_ALEN];
464 int first_errno = 0;
465 int ret;
466 int i;
467 u32 flags = 0;
468
469 lockdep_assert_held(&ar->conf_mutex);
470
471 for (;;) {
472 /* since ath10k_install_key we can't hold data_lock all the
473 * time, so we try to remove the keys incrementally
474 */
475 spin_lock_bh(&ar->data_lock);
476 i = 0;
477 list_for_each_entry(peer, &ar->peers, list) {
478 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
479 if (peer->keys[i] == key) {
480 ether_addr_copy(addr, peer->addr);
481 peer->keys[i] = NULL;
482 break;
483 }
484 }
485
486 if (i < ARRAY_SIZE(peer->keys))
487 break;
488 }
489 spin_unlock_bh(&ar->data_lock);
490
491 if (i == ARRAY_SIZE(peer->keys))
492 break;
493 /* key flags are not required to delete the key */
494 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
495 if (ret < 0 && first_errno == 0)
496 first_errno = ret;
497
498 if (ret)
499 ath10k_warn(ar, "failed to remove key for %pM: %d\n",
500 addr, ret);
501 }
502
503 return first_errno;
504 }
505
ath10k_mac_vif_update_wep_key(struct ath10k_vif * arvif,struct ieee80211_key_conf * key)506 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
507 struct ieee80211_key_conf *key)
508 {
509 struct ath10k *ar = arvif->ar;
510 struct ath10k_peer *peer;
511 int ret;
512
513 lockdep_assert_held(&ar->conf_mutex);
514
515 list_for_each_entry(peer, &ar->peers, list) {
516 if (ether_addr_equal(peer->addr, arvif->vif->addr))
517 continue;
518
519 if (ether_addr_equal(peer->addr, arvif->bssid))
520 continue;
521
522 if (peer->keys[key->keyidx] == key)
523 continue;
524
525 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
526 arvif->vdev_id, key->keyidx);
527
528 ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
529 if (ret) {
530 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
531 arvif->vdev_id, peer->addr, ret);
532 return ret;
533 }
534 }
535
536 return 0;
537 }
538
539 /*********************/
540 /* General utilities */
541 /*********************/
542
543 static inline enum wmi_phy_mode
chan_to_phymode(const struct cfg80211_chan_def * chandef)544 chan_to_phymode(const struct cfg80211_chan_def *chandef)
545 {
546 enum wmi_phy_mode phymode = MODE_UNKNOWN;
547
548 switch (chandef->chan->band) {
549 case NL80211_BAND_2GHZ:
550 switch (chandef->width) {
551 case NL80211_CHAN_WIDTH_20_NOHT:
552 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
553 phymode = MODE_11B;
554 else
555 phymode = MODE_11G;
556 break;
557 case NL80211_CHAN_WIDTH_20:
558 phymode = MODE_11NG_HT20;
559 break;
560 case NL80211_CHAN_WIDTH_40:
561 phymode = MODE_11NG_HT40;
562 break;
563 case NL80211_CHAN_WIDTH_5:
564 case NL80211_CHAN_WIDTH_10:
565 case NL80211_CHAN_WIDTH_80:
566 case NL80211_CHAN_WIDTH_80P80:
567 case NL80211_CHAN_WIDTH_160:
568 phymode = MODE_UNKNOWN;
569 break;
570 }
571 break;
572 case NL80211_BAND_5GHZ:
573 switch (chandef->width) {
574 case NL80211_CHAN_WIDTH_20_NOHT:
575 phymode = MODE_11A;
576 break;
577 case NL80211_CHAN_WIDTH_20:
578 phymode = MODE_11NA_HT20;
579 break;
580 case NL80211_CHAN_WIDTH_40:
581 phymode = MODE_11NA_HT40;
582 break;
583 case NL80211_CHAN_WIDTH_80:
584 phymode = MODE_11AC_VHT80;
585 break;
586 case NL80211_CHAN_WIDTH_160:
587 phymode = MODE_11AC_VHT160;
588 break;
589 case NL80211_CHAN_WIDTH_80P80:
590 phymode = MODE_11AC_VHT80_80;
591 break;
592 case NL80211_CHAN_WIDTH_5:
593 case NL80211_CHAN_WIDTH_10:
594 phymode = MODE_UNKNOWN;
595 break;
596 }
597 break;
598 default:
599 break;
600 }
601
602 WARN_ON(phymode == MODE_UNKNOWN);
603 return phymode;
604 }
605
ath10k_parse_mpdudensity(u8 mpdudensity)606 static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
607 {
608 /*
609 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
610 * 0 for no restriction
611 * 1 for 1/4 us
612 * 2 for 1/2 us
613 * 3 for 1 us
614 * 4 for 2 us
615 * 5 for 4 us
616 * 6 for 8 us
617 * 7 for 16 us
618 */
619 switch (mpdudensity) {
620 case 0:
621 return 0;
622 case 1:
623 case 2:
624 case 3:
625 /* Our lower layer calculations limit our precision to
626 * 1 microsecond
627 */
628 return 1;
629 case 4:
630 return 2;
631 case 5:
632 return 4;
633 case 6:
634 return 8;
635 case 7:
636 return 16;
637 default:
638 return 0;
639 }
640 }
641
ath10k_mac_vif_chan(struct ieee80211_vif * vif,struct cfg80211_chan_def * def)642 int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
643 struct cfg80211_chan_def *def)
644 {
645 struct ieee80211_chanctx_conf *conf;
646
647 rcu_read_lock();
648 conf = rcu_dereference(vif->chanctx_conf);
649 if (!conf) {
650 rcu_read_unlock();
651 return -ENOENT;
652 }
653
654 *def = conf->def;
655 rcu_read_unlock();
656
657 return 0;
658 }
659
ath10k_mac_num_chanctxs_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)660 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
661 struct ieee80211_chanctx_conf *conf,
662 void *data)
663 {
664 int *num = data;
665
666 (*num)++;
667 }
668
ath10k_mac_num_chanctxs(struct ath10k * ar)669 static int ath10k_mac_num_chanctxs(struct ath10k *ar)
670 {
671 int num = 0;
672
673 ieee80211_iter_chan_contexts_atomic(ar->hw,
674 ath10k_mac_num_chanctxs_iter,
675 &num);
676
677 return num;
678 }
679
680 static void
ath10k_mac_get_any_chandef_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)681 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
682 struct ieee80211_chanctx_conf *conf,
683 void *data)
684 {
685 struct cfg80211_chan_def **def = data;
686
687 *def = &conf->def;
688 }
689
ath10k_peer_create(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 vdev_id,const u8 * addr,enum wmi_peer_type peer_type)690 static int ath10k_peer_create(struct ath10k *ar,
691 struct ieee80211_vif *vif,
692 struct ieee80211_sta *sta,
693 u32 vdev_id,
694 const u8 *addr,
695 enum wmi_peer_type peer_type)
696 {
697 struct ath10k_vif *arvif;
698 struct ath10k_peer *peer;
699 int num_peers = 0;
700 int ret;
701
702 lockdep_assert_held(&ar->conf_mutex);
703
704 num_peers = ar->num_peers;
705
706 /* Each vdev consumes a peer entry as well */
707 list_for_each_entry(arvif, &ar->arvifs, list)
708 num_peers++;
709
710 if (num_peers >= ar->max_num_peers)
711 return -ENOBUFS;
712
713 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
714 if (ret) {
715 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
716 addr, vdev_id, ret);
717 return ret;
718 }
719
720 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
721 if (ret) {
722 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
723 addr, vdev_id, ret);
724 return ret;
725 }
726
727 spin_lock_bh(&ar->data_lock);
728
729 peer = ath10k_peer_find(ar, vdev_id, addr);
730 if (!peer) {
731 spin_unlock_bh(&ar->data_lock);
732 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
733 addr, vdev_id);
734 ath10k_wmi_peer_delete(ar, vdev_id, addr);
735 return -ENOENT;
736 }
737
738 peer->vif = vif;
739 peer->sta = sta;
740
741 spin_unlock_bh(&ar->data_lock);
742
743 ar->num_peers++;
744
745 return 0;
746 }
747
ath10k_mac_set_kickout(struct ath10k_vif * arvif)748 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
749 {
750 struct ath10k *ar = arvif->ar;
751 u32 param;
752 int ret;
753
754 param = ar->wmi.pdev_param->sta_kickout_th;
755 ret = ath10k_wmi_pdev_set_param(ar, param,
756 ATH10K_KICKOUT_THRESHOLD);
757 if (ret) {
758 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
759 arvif->vdev_id, ret);
760 return ret;
761 }
762
763 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
764 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
765 ATH10K_KEEPALIVE_MIN_IDLE);
766 if (ret) {
767 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
768 arvif->vdev_id, ret);
769 return ret;
770 }
771
772 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
773 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
774 ATH10K_KEEPALIVE_MAX_IDLE);
775 if (ret) {
776 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
777 arvif->vdev_id, ret);
778 return ret;
779 }
780
781 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
782 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
783 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
784 if (ret) {
785 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
786 arvif->vdev_id, ret);
787 return ret;
788 }
789
790 return 0;
791 }
792
ath10k_mac_set_rts(struct ath10k_vif * arvif,u32 value)793 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
794 {
795 struct ath10k *ar = arvif->ar;
796 u32 vdev_param;
797
798 vdev_param = ar->wmi.vdev_param->rts_threshold;
799 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
800 }
801
ath10k_peer_delete(struct ath10k * ar,u32 vdev_id,const u8 * addr)802 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
803 {
804 int ret;
805
806 lockdep_assert_held(&ar->conf_mutex);
807
808 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
809 if (ret)
810 return ret;
811
812 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
813 if (ret)
814 return ret;
815
816 ar->num_peers--;
817
818 return 0;
819 }
820
ath10k_peer_cleanup(struct ath10k * ar,u32 vdev_id)821 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
822 {
823 struct ath10k_peer *peer, *tmp;
824 int peer_id;
825 int i;
826
827 lockdep_assert_held(&ar->conf_mutex);
828
829 spin_lock_bh(&ar->data_lock);
830 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
831 if (peer->vdev_id != vdev_id)
832 continue;
833
834 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
835 peer->addr, vdev_id);
836
837 for_each_set_bit(peer_id, peer->peer_ids,
838 ATH10K_MAX_NUM_PEER_IDS) {
839 ar->peer_map[peer_id] = NULL;
840 }
841
842 /* Double check that peer is properly un-referenced from
843 * the peer_map
844 */
845 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
846 if (ar->peer_map[i] == peer) {
847 ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
848 peer->addr, peer, i);
849 ar->peer_map[i] = NULL;
850 }
851 }
852
853 list_del(&peer->list);
854 kfree(peer);
855 ar->num_peers--;
856 }
857 spin_unlock_bh(&ar->data_lock);
858 }
859
ath10k_peer_cleanup_all(struct ath10k * ar)860 static void ath10k_peer_cleanup_all(struct ath10k *ar)
861 {
862 struct ath10k_peer *peer, *tmp;
863 int i;
864
865 lockdep_assert_held(&ar->conf_mutex);
866
867 spin_lock_bh(&ar->data_lock);
868 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
869 list_del(&peer->list);
870 kfree(peer);
871 }
872
873 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
874 ar->peer_map[i] = NULL;
875
876 spin_unlock_bh(&ar->data_lock);
877
878 ar->num_peers = 0;
879 ar->num_stations = 0;
880 }
881
ath10k_mac_tdls_peer_update(struct ath10k * ar,u32 vdev_id,struct ieee80211_sta * sta,enum wmi_tdls_peer_state state)882 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
883 struct ieee80211_sta *sta,
884 enum wmi_tdls_peer_state state)
885 {
886 int ret;
887 struct wmi_tdls_peer_update_cmd_arg arg = {};
888 struct wmi_tdls_peer_capab_arg cap = {};
889 struct wmi_channel_arg chan_arg = {};
890
891 lockdep_assert_held(&ar->conf_mutex);
892
893 arg.vdev_id = vdev_id;
894 arg.peer_state = state;
895 ether_addr_copy(arg.addr, sta->addr);
896
897 cap.peer_max_sp = sta->max_sp;
898 cap.peer_uapsd_queues = sta->uapsd_queues;
899
900 if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
901 !sta->tdls_initiator)
902 cap.is_peer_responder = 1;
903
904 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
905 if (ret) {
906 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
907 arg.addr, vdev_id, ret);
908 return ret;
909 }
910
911 return 0;
912 }
913
914 /************************/
915 /* Interface management */
916 /************************/
917
ath10k_mac_vif_beacon_free(struct ath10k_vif * arvif)918 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
919 {
920 struct ath10k *ar = arvif->ar;
921
922 lockdep_assert_held(&ar->data_lock);
923
924 if (!arvif->beacon)
925 return;
926
927 if (!arvif->beacon_buf)
928 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
929 arvif->beacon->len, DMA_TO_DEVICE);
930
931 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
932 arvif->beacon_state != ATH10K_BEACON_SENT))
933 return;
934
935 dev_kfree_skb_any(arvif->beacon);
936
937 arvif->beacon = NULL;
938 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
939 }
940
ath10k_mac_vif_beacon_cleanup(struct ath10k_vif * arvif)941 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
942 {
943 struct ath10k *ar = arvif->ar;
944
945 lockdep_assert_held(&ar->data_lock);
946
947 ath10k_mac_vif_beacon_free(arvif);
948
949 if (arvif->beacon_buf) {
950 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
951 arvif->beacon_buf, arvif->beacon_paddr);
952 arvif->beacon_buf = NULL;
953 }
954 }
955
ath10k_vdev_setup_sync(struct ath10k * ar)956 static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
957 {
958 unsigned long time_left;
959
960 lockdep_assert_held(&ar->conf_mutex);
961
962 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
963 return -ESHUTDOWN;
964
965 time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
966 ATH10K_VDEV_SETUP_TIMEOUT_HZ);
967 if (time_left == 0)
968 return -ETIMEDOUT;
969
970 return 0;
971 }
972
ath10k_monitor_vdev_start(struct ath10k * ar,int vdev_id)973 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
974 {
975 struct cfg80211_chan_def *chandef = NULL;
976 struct ieee80211_channel *channel = NULL;
977 struct wmi_vdev_start_request_arg arg = {};
978 int ret = 0;
979
980 lockdep_assert_held(&ar->conf_mutex);
981
982 ieee80211_iter_chan_contexts_atomic(ar->hw,
983 ath10k_mac_get_any_chandef_iter,
984 &chandef);
985 if (WARN_ON_ONCE(!chandef))
986 return -ENOENT;
987
988 channel = chandef->chan;
989
990 arg.vdev_id = vdev_id;
991 arg.channel.freq = channel->center_freq;
992 arg.channel.band_center_freq1 = chandef->center_freq1;
993 arg.channel.band_center_freq2 = chandef->center_freq2;
994
995 /* TODO setup this dynamically, what in case we
996 * don't have any vifs?
997 */
998 arg.channel.mode = chan_to_phymode(chandef);
999 arg.channel.chan_radar =
1000 !!(channel->flags & IEEE80211_CHAN_RADAR);
1001
1002 arg.channel.min_power = 0;
1003 arg.channel.max_power = channel->max_power * 2;
1004 arg.channel.max_reg_power = channel->max_reg_power * 2;
1005 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
1006
1007 reinit_completion(&ar->vdev_setup_done);
1008
1009 ret = ath10k_wmi_vdev_start(ar, &arg);
1010 if (ret) {
1011 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
1012 vdev_id, ret);
1013 return ret;
1014 }
1015
1016 ret = ath10k_vdev_setup_sync(ar);
1017 if (ret) {
1018 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
1019 vdev_id, ret);
1020 return ret;
1021 }
1022
1023 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
1024 if (ret) {
1025 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
1026 vdev_id, ret);
1027 goto vdev_stop;
1028 }
1029
1030 ar->monitor_vdev_id = vdev_id;
1031
1032 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
1033 ar->monitor_vdev_id);
1034 return 0;
1035
1036 vdev_stop:
1037 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1038 if (ret)
1039 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
1040 ar->monitor_vdev_id, ret);
1041
1042 return ret;
1043 }
1044
ath10k_monitor_vdev_stop(struct ath10k * ar)1045 static int ath10k_monitor_vdev_stop(struct ath10k *ar)
1046 {
1047 int ret = 0;
1048
1049 lockdep_assert_held(&ar->conf_mutex);
1050
1051 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
1052 if (ret)
1053 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
1054 ar->monitor_vdev_id, ret);
1055
1056 reinit_completion(&ar->vdev_setup_done);
1057
1058 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1059 if (ret)
1060 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
1061 ar->monitor_vdev_id, ret);
1062
1063 ret = ath10k_vdev_setup_sync(ar);
1064 if (ret)
1065 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
1066 ar->monitor_vdev_id, ret);
1067
1068 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
1069 ar->monitor_vdev_id);
1070 return ret;
1071 }
1072
ath10k_monitor_vdev_create(struct ath10k * ar)1073 static int ath10k_monitor_vdev_create(struct ath10k *ar)
1074 {
1075 int bit, ret = 0;
1076
1077 lockdep_assert_held(&ar->conf_mutex);
1078
1079 if (ar->free_vdev_map == 0) {
1080 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
1081 return -ENOMEM;
1082 }
1083
1084 bit = __ffs64(ar->free_vdev_map);
1085
1086 ar->monitor_vdev_id = bit;
1087
1088 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
1089 WMI_VDEV_TYPE_MONITOR,
1090 0, ar->mac_addr);
1091 if (ret) {
1092 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
1093 ar->monitor_vdev_id, ret);
1094 return ret;
1095 }
1096
1097 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1098 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
1099 ar->monitor_vdev_id);
1100
1101 return 0;
1102 }
1103
ath10k_monitor_vdev_delete(struct ath10k * ar)1104 static int ath10k_monitor_vdev_delete(struct ath10k *ar)
1105 {
1106 int ret = 0;
1107
1108 lockdep_assert_held(&ar->conf_mutex);
1109
1110 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1111 if (ret) {
1112 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
1113 ar->monitor_vdev_id, ret);
1114 return ret;
1115 }
1116
1117 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
1118
1119 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
1120 ar->monitor_vdev_id);
1121 return ret;
1122 }
1123
ath10k_monitor_start(struct ath10k * ar)1124 static int ath10k_monitor_start(struct ath10k *ar)
1125 {
1126 int ret;
1127
1128 lockdep_assert_held(&ar->conf_mutex);
1129
1130 ret = ath10k_monitor_vdev_create(ar);
1131 if (ret) {
1132 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
1133 return ret;
1134 }
1135
1136 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
1137 if (ret) {
1138 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
1139 ath10k_monitor_vdev_delete(ar);
1140 return ret;
1141 }
1142
1143 ar->monitor_started = true;
1144 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
1145
1146 return 0;
1147 }
1148
ath10k_monitor_stop(struct ath10k * ar)1149 static int ath10k_monitor_stop(struct ath10k *ar)
1150 {
1151 int ret;
1152
1153 lockdep_assert_held(&ar->conf_mutex);
1154
1155 ret = ath10k_monitor_vdev_stop(ar);
1156 if (ret) {
1157 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
1158 return ret;
1159 }
1160
1161 ret = ath10k_monitor_vdev_delete(ar);
1162 if (ret) {
1163 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
1164 return ret;
1165 }
1166
1167 ar->monitor_started = false;
1168 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
1169
1170 return 0;
1171 }
1172
ath10k_mac_monitor_vdev_is_needed(struct ath10k * ar)1173 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1174 {
1175 int num_ctx;
1176
1177 /* At least one chanctx is required to derive a channel to start
1178 * monitor vdev on.
1179 */
1180 num_ctx = ath10k_mac_num_chanctxs(ar);
1181 if (num_ctx == 0)
1182 return false;
1183
1184 /* If there's already an existing special monitor interface then don't
1185 * bother creating another monitor vdev.
1186 */
1187 if (ar->monitor_arvif)
1188 return false;
1189
1190 return ar->monitor ||
1191 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST,
1192 ar->running_fw->fw_file.fw_features) &&
1193 (ar->filter_flags & FIF_OTHER_BSS)) ||
1194 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1195 }
1196
ath10k_mac_monitor_vdev_is_allowed(struct ath10k * ar)1197 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1198 {
1199 int num_ctx;
1200
1201 num_ctx = ath10k_mac_num_chanctxs(ar);
1202
1203 /* FIXME: Current interface combinations and cfg80211/mac80211 code
1204 * shouldn't allow this but make sure to prevent handling the following
1205 * case anyway since multi-channel DFS hasn't been tested at all.
1206 */
1207 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1208 return false;
1209
1210 return true;
1211 }
1212
ath10k_monitor_recalc(struct ath10k * ar)1213 static int ath10k_monitor_recalc(struct ath10k *ar)
1214 {
1215 bool needed;
1216 bool allowed;
1217 int ret;
1218
1219 lockdep_assert_held(&ar->conf_mutex);
1220
1221 needed = ath10k_mac_monitor_vdev_is_needed(ar);
1222 allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
1223
1224 ath10k_dbg(ar, ATH10K_DBG_MAC,
1225 "mac monitor recalc started? %d needed? %d allowed? %d\n",
1226 ar->monitor_started, needed, allowed);
1227
1228 if (WARN_ON(needed && !allowed)) {
1229 if (ar->monitor_started) {
1230 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1231
1232 ret = ath10k_monitor_stop(ar);
1233 if (ret)
1234 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
1235 ret);
1236 /* not serious */
1237 }
1238
1239 return -EPERM;
1240 }
1241
1242 if (needed == ar->monitor_started)
1243 return 0;
1244
1245 if (needed)
1246 return ath10k_monitor_start(ar);
1247 else
1248 return ath10k_monitor_stop(ar);
1249 }
1250
ath10k_mac_can_set_cts_prot(struct ath10k_vif * arvif)1251 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
1252 {
1253 struct ath10k *ar = arvif->ar;
1254
1255 lockdep_assert_held(&ar->conf_mutex);
1256
1257 if (!arvif->is_started) {
1258 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
1259 return false;
1260 }
1261
1262 return true;
1263 }
1264
ath10k_mac_set_cts_prot(struct ath10k_vif * arvif)1265 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
1266 {
1267 struct ath10k *ar = arvif->ar;
1268 u32 vdev_param;
1269
1270 lockdep_assert_held(&ar->conf_mutex);
1271
1272 vdev_param = ar->wmi.vdev_param->protection_mode;
1273
1274 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
1275 arvif->vdev_id, arvif->use_cts_prot);
1276
1277 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1278 arvif->use_cts_prot ? 1 : 0);
1279 }
1280
ath10k_recalc_rtscts_prot(struct ath10k_vif * arvif)1281 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1282 {
1283 struct ath10k *ar = arvif->ar;
1284 u32 vdev_param, rts_cts = 0;
1285
1286 lockdep_assert_held(&ar->conf_mutex);
1287
1288 vdev_param = ar->wmi.vdev_param->enable_rtscts;
1289
1290 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
1291
1292 if (arvif->num_legacy_stations > 0)
1293 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1294 WMI_RTSCTS_PROFILE);
1295 else
1296 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1297 WMI_RTSCTS_PROFILE);
1298
1299 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
1300 arvif->vdev_id, rts_cts);
1301
1302 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1303 rts_cts);
1304 }
1305
ath10k_start_cac(struct ath10k * ar)1306 static int ath10k_start_cac(struct ath10k *ar)
1307 {
1308 int ret;
1309
1310 lockdep_assert_held(&ar->conf_mutex);
1311
1312 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1313
1314 ret = ath10k_monitor_recalc(ar);
1315 if (ret) {
1316 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
1317 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1318 return ret;
1319 }
1320
1321 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
1322 ar->monitor_vdev_id);
1323
1324 return 0;
1325 }
1326
ath10k_stop_cac(struct ath10k * ar)1327 static int ath10k_stop_cac(struct ath10k *ar)
1328 {
1329 lockdep_assert_held(&ar->conf_mutex);
1330
1331 /* CAC is not running - do nothing */
1332 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1333 return 0;
1334
1335 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1336 ath10k_monitor_stop(ar);
1337
1338 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
1339
1340 return 0;
1341 }
1342
ath10k_mac_has_radar_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)1343 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1344 struct ieee80211_chanctx_conf *conf,
1345 void *data)
1346 {
1347 bool *ret = data;
1348
1349 if (!*ret && conf->radar_enabled)
1350 *ret = true;
1351 }
1352
ath10k_mac_has_radar_enabled(struct ath10k * ar)1353 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1354 {
1355 bool has_radar = false;
1356
1357 ieee80211_iter_chan_contexts_atomic(ar->hw,
1358 ath10k_mac_has_radar_iter,
1359 &has_radar);
1360
1361 return has_radar;
1362 }
1363
ath10k_recalc_radar_detection(struct ath10k * ar)1364 static void ath10k_recalc_radar_detection(struct ath10k *ar)
1365 {
1366 int ret;
1367
1368 lockdep_assert_held(&ar->conf_mutex);
1369
1370 ath10k_stop_cac(ar);
1371
1372 if (!ath10k_mac_has_radar_enabled(ar))
1373 return;
1374
1375 if (ar->num_started_vdevs > 0)
1376 return;
1377
1378 ret = ath10k_start_cac(ar);
1379 if (ret) {
1380 /*
1381 * Not possible to start CAC on current channel so starting
1382 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1383 * by indicating that radar was detected.
1384 */
1385 ath10k_warn(ar, "failed to start CAC: %d\n", ret);
1386 ieee80211_radar_detected(ar->hw);
1387 }
1388 }
1389
ath10k_vdev_stop(struct ath10k_vif * arvif)1390 static int ath10k_vdev_stop(struct ath10k_vif *arvif)
1391 {
1392 struct ath10k *ar = arvif->ar;
1393 int ret;
1394
1395 lockdep_assert_held(&ar->conf_mutex);
1396
1397 reinit_completion(&ar->vdev_setup_done);
1398
1399 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
1400 if (ret) {
1401 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
1402 arvif->vdev_id, ret);
1403 return ret;
1404 }
1405
1406 ret = ath10k_vdev_setup_sync(ar);
1407 if (ret) {
1408 ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n",
1409 arvif->vdev_id, ret);
1410 return ret;
1411 }
1412
1413 WARN_ON(ar->num_started_vdevs == 0);
1414
1415 if (ar->num_started_vdevs != 0) {
1416 ar->num_started_vdevs--;
1417 ath10k_recalc_radar_detection(ar);
1418 }
1419
1420 return ret;
1421 }
1422
ath10k_vdev_start_restart(struct ath10k_vif * arvif,const struct cfg80211_chan_def * chandef,bool restart)1423 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1424 const struct cfg80211_chan_def *chandef,
1425 bool restart)
1426 {
1427 struct ath10k *ar = arvif->ar;
1428 struct wmi_vdev_start_request_arg arg = {};
1429 int ret = 0;
1430
1431 lockdep_assert_held(&ar->conf_mutex);
1432
1433 reinit_completion(&ar->vdev_setup_done);
1434
1435 arg.vdev_id = arvif->vdev_id;
1436 arg.dtim_period = arvif->dtim_period;
1437 arg.bcn_intval = arvif->beacon_interval;
1438
1439 arg.channel.freq = chandef->chan->center_freq;
1440 arg.channel.band_center_freq1 = chandef->center_freq1;
1441 arg.channel.band_center_freq2 = chandef->center_freq2;
1442 arg.channel.mode = chan_to_phymode(chandef);
1443
1444 arg.channel.min_power = 0;
1445 arg.channel.max_power = chandef->chan->max_power * 2;
1446 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1447 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
1448
1449 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1450 arg.ssid = arvif->u.ap.ssid;
1451 arg.ssid_len = arvif->u.ap.ssid_len;
1452 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1453
1454 /* For now allow DFS for AP mode */
1455 arg.channel.chan_radar =
1456 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1457 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1458 arg.ssid = arvif->vif->bss_conf.ssid;
1459 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
1460 }
1461
1462 ath10k_dbg(ar, ATH10K_DBG_MAC,
1463 "mac vdev %d start center_freq %d phymode %s\n",
1464 arg.vdev_id, arg.channel.freq,
1465 ath10k_wmi_phymode_str(arg.channel.mode));
1466
1467 if (restart)
1468 ret = ath10k_wmi_vdev_restart(ar, &arg);
1469 else
1470 ret = ath10k_wmi_vdev_start(ar, &arg);
1471
1472 if (ret) {
1473 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
1474 arg.vdev_id, ret);
1475 return ret;
1476 }
1477
1478 ret = ath10k_vdev_setup_sync(ar);
1479 if (ret) {
1480 ath10k_warn(ar,
1481 "failed to synchronize setup for vdev %i restart %d: %d\n",
1482 arg.vdev_id, restart, ret);
1483 return ret;
1484 }
1485
1486 ar->num_started_vdevs++;
1487 ath10k_recalc_radar_detection(ar);
1488
1489 return ret;
1490 }
1491
ath10k_vdev_start(struct ath10k_vif * arvif,const struct cfg80211_chan_def * def)1492 static int ath10k_vdev_start(struct ath10k_vif *arvif,
1493 const struct cfg80211_chan_def *def)
1494 {
1495 return ath10k_vdev_start_restart(arvif, def, false);
1496 }
1497
ath10k_vdev_restart(struct ath10k_vif * arvif,const struct cfg80211_chan_def * def)1498 static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1499 const struct cfg80211_chan_def *def)
1500 {
1501 return ath10k_vdev_start_restart(arvif, def, true);
1502 }
1503
ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif * arvif,struct sk_buff * bcn)1504 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1505 struct sk_buff *bcn)
1506 {
1507 struct ath10k *ar = arvif->ar;
1508 struct ieee80211_mgmt *mgmt;
1509 const u8 *p2p_ie;
1510 int ret;
1511
1512 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
1513 return 0;
1514
1515 mgmt = (void *)bcn->data;
1516 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1517 mgmt->u.beacon.variable,
1518 bcn->len - (mgmt->u.beacon.variable -
1519 bcn->data));
1520 if (!p2p_ie)
1521 return -ENOENT;
1522
1523 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1524 if (ret) {
1525 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1526 arvif->vdev_id, ret);
1527 return ret;
1528 }
1529
1530 return 0;
1531 }
1532
ath10k_mac_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,size_t ie_offset)1533 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1534 u8 oui_type, size_t ie_offset)
1535 {
1536 size_t len;
1537 const u8 *next;
1538 const u8 *end;
1539 u8 *ie;
1540
1541 if (WARN_ON(skb->len < ie_offset))
1542 return -EINVAL;
1543
1544 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1545 skb->data + ie_offset,
1546 skb->len - ie_offset);
1547 if (!ie)
1548 return -ENOENT;
1549
1550 len = ie[1] + 2;
1551 end = skb->data + skb->len;
1552 next = ie + len;
1553
1554 if (WARN_ON(next > end))
1555 return -EINVAL;
1556
1557 memmove(ie, next, end - next);
1558 skb_trim(skb, skb->len - len);
1559
1560 return 0;
1561 }
1562
ath10k_mac_setup_bcn_tmpl(struct ath10k_vif * arvif)1563 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1564 {
1565 struct ath10k *ar = arvif->ar;
1566 struct ieee80211_hw *hw = ar->hw;
1567 struct ieee80211_vif *vif = arvif->vif;
1568 struct ieee80211_mutable_offsets offs = {};
1569 struct sk_buff *bcn;
1570 int ret;
1571
1572 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1573 return 0;
1574
1575 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1576 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1577 return 0;
1578
1579 bcn = ieee80211_beacon_get_template(hw, vif, &offs);
1580 if (!bcn) {
1581 ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1582 return -EPERM;
1583 }
1584
1585 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1586 if (ret) {
1587 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1588 kfree_skb(bcn);
1589 return ret;
1590 }
1591
1592 /* P2P IE is inserted by firmware automatically (as configured above)
1593 * so remove it from the base beacon template to avoid duplicate P2P
1594 * IEs in beacon frames.
1595 */
1596 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1597 offsetof(struct ieee80211_mgmt,
1598 u.beacon.variable));
1599
1600 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1601 0, NULL, 0);
1602 kfree_skb(bcn);
1603
1604 if (ret) {
1605 ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1606 ret);
1607 return ret;
1608 }
1609
1610 return 0;
1611 }
1612
ath10k_mac_setup_prb_tmpl(struct ath10k_vif * arvif)1613 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1614 {
1615 struct ath10k *ar = arvif->ar;
1616 struct ieee80211_hw *hw = ar->hw;
1617 struct ieee80211_vif *vif = arvif->vif;
1618 struct sk_buff *prb;
1619 int ret;
1620
1621 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1622 return 0;
1623
1624 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1625 return 0;
1626
1627 prb = ieee80211_proberesp_get(hw, vif);
1628 if (!prb) {
1629 ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1630 return -EPERM;
1631 }
1632
1633 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1634 kfree_skb(prb);
1635
1636 if (ret) {
1637 ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1638 ret);
1639 return ret;
1640 }
1641
1642 return 0;
1643 }
1644
ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif * arvif)1645 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1646 {
1647 struct ath10k *ar = arvif->ar;
1648 struct cfg80211_chan_def def;
1649 int ret;
1650
1651 /* When originally vdev is started during assign_vif_chanctx() some
1652 * information is missing, notably SSID. Firmware revisions with beacon
1653 * offloading require the SSID to be provided during vdev (re)start to
1654 * handle hidden SSID properly.
1655 *
1656 * Vdev restart must be done after vdev has been both started and
1657 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1658 * deliver vdev restart response event causing timeouts during vdev
1659 * syncing in ath10k.
1660 *
1661 * Note: The vdev down/up and template reinstallation could be skipped
1662 * since only wmi-tlv firmware are known to have beacon offload and
1663 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1664 * response delivery. It's probably more robust to keep it as is.
1665 */
1666 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1667 return 0;
1668
1669 if (WARN_ON(!arvif->is_started))
1670 return -EINVAL;
1671
1672 if (WARN_ON(!arvif->is_up))
1673 return -EINVAL;
1674
1675 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1676 return -EINVAL;
1677
1678 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1679 if (ret) {
1680 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
1681 arvif->vdev_id, ret);
1682 return ret;
1683 }
1684
1685 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1686 * firmware will crash upon vdev up.
1687 */
1688
1689 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1690 if (ret) {
1691 ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
1692 return ret;
1693 }
1694
1695 ret = ath10k_mac_setup_prb_tmpl(arvif);
1696 if (ret) {
1697 ath10k_warn(ar, "failed to update presp template: %d\n", ret);
1698 return ret;
1699 }
1700
1701 ret = ath10k_vdev_restart(arvif, &def);
1702 if (ret) {
1703 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
1704 arvif->vdev_id, ret);
1705 return ret;
1706 }
1707
1708 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1709 arvif->bssid);
1710 if (ret) {
1711 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
1712 arvif->vdev_id, ret);
1713 return ret;
1714 }
1715
1716 return 0;
1717 }
1718
ath10k_control_beaconing(struct ath10k_vif * arvif,struct ieee80211_bss_conf * info)1719 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
1720 struct ieee80211_bss_conf *info)
1721 {
1722 struct ath10k *ar = arvif->ar;
1723 int ret = 0;
1724
1725 lockdep_assert_held(&arvif->ar->conf_mutex);
1726
1727 if (!info->enable_beacon) {
1728 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1729 if (ret)
1730 ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
1731 arvif->vdev_id, ret);
1732
1733 arvif->is_up = false;
1734
1735 spin_lock_bh(&arvif->ar->data_lock);
1736 ath10k_mac_vif_beacon_free(arvif);
1737 spin_unlock_bh(&arvif->ar->data_lock);
1738
1739 return;
1740 }
1741
1742 arvif->tx_seq_no = 0x1000;
1743
1744 arvif->aid = 0;
1745 ether_addr_copy(arvif->bssid, info->bssid);
1746
1747 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1748 arvif->bssid);
1749 if (ret) {
1750 ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
1751 arvif->vdev_id, ret);
1752 return;
1753 }
1754
1755 arvif->is_up = true;
1756
1757 ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1758 if (ret) {
1759 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1760 arvif->vdev_id, ret);
1761 return;
1762 }
1763
1764 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1765 }
1766
ath10k_control_ibss(struct ath10k_vif * arvif,struct ieee80211_bss_conf * info,const u8 self_peer[ETH_ALEN])1767 static void ath10k_control_ibss(struct ath10k_vif *arvif,
1768 struct ieee80211_bss_conf *info,
1769 const u8 self_peer[ETH_ALEN])
1770 {
1771 struct ath10k *ar = arvif->ar;
1772 u32 vdev_param;
1773 int ret = 0;
1774
1775 lockdep_assert_held(&arvif->ar->conf_mutex);
1776
1777 if (!info->ibss_joined) {
1778 if (is_zero_ether_addr(arvif->bssid))
1779 return;
1780
1781 eth_zero_addr(arvif->bssid);
1782
1783 return;
1784 }
1785
1786 vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1787 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
1788 ATH10K_DEFAULT_ATIM);
1789 if (ret)
1790 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
1791 arvif->vdev_id, ret);
1792 }
1793
ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif * arvif)1794 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1795 {
1796 struct ath10k *ar = arvif->ar;
1797 u32 param;
1798 u32 value;
1799 int ret;
1800
1801 lockdep_assert_held(&arvif->ar->conf_mutex);
1802
1803 if (arvif->u.sta.uapsd)
1804 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1805 else
1806 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1807
1808 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1809 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1810 if (ret) {
1811 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1812 value, arvif->vdev_id, ret);
1813 return ret;
1814 }
1815
1816 return 0;
1817 }
1818
ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif * arvif)1819 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1820 {
1821 struct ath10k *ar = arvif->ar;
1822 u32 param;
1823 u32 value;
1824 int ret;
1825
1826 lockdep_assert_held(&arvif->ar->conf_mutex);
1827
1828 if (arvif->u.sta.uapsd)
1829 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1830 else
1831 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1832
1833 param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1834 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1835 param, value);
1836 if (ret) {
1837 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1838 value, arvif->vdev_id, ret);
1839 return ret;
1840 }
1841
1842 return 0;
1843 }
1844
ath10k_mac_num_vifs_started(struct ath10k * ar)1845 static int ath10k_mac_num_vifs_started(struct ath10k *ar)
1846 {
1847 struct ath10k_vif *arvif;
1848 int num = 0;
1849
1850 lockdep_assert_held(&ar->conf_mutex);
1851
1852 list_for_each_entry(arvif, &ar->arvifs, list)
1853 if (arvif->is_started)
1854 num++;
1855
1856 return num;
1857 }
1858
ath10k_mac_vif_setup_ps(struct ath10k_vif * arvif)1859 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1860 {
1861 struct ath10k *ar = arvif->ar;
1862 struct ieee80211_vif *vif = arvif->vif;
1863 struct ieee80211_conf *conf = &ar->hw->conf;
1864 enum wmi_sta_powersave_param param;
1865 enum wmi_sta_ps_mode psmode;
1866 int ret;
1867 int ps_timeout;
1868 bool enable_ps;
1869
1870 lockdep_assert_held(&arvif->ar->conf_mutex);
1871
1872 if (arvif->vif->type != NL80211_IFTYPE_STATION)
1873 return 0;
1874
1875 enable_ps = arvif->ps;
1876
1877 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
1878 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1879 ar->running_fw->fw_file.fw_features)) {
1880 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
1881 arvif->vdev_id);
1882 enable_ps = false;
1883 }
1884
1885 if (!arvif->is_started) {
1886 /* mac80211 can update vif powersave state while disconnected.
1887 * Firmware doesn't behave nicely and consumes more power than
1888 * necessary if PS is disabled on a non-started vdev. Hence
1889 * force-enable PS for non-running vdevs.
1890 */
1891 psmode = WMI_STA_PS_MODE_ENABLED;
1892 } else if (enable_ps) {
1893 psmode = WMI_STA_PS_MODE_ENABLED;
1894 param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1895
1896 ps_timeout = conf->dynamic_ps_timeout;
1897 if (ps_timeout == 0) {
1898 /* Firmware doesn't like 0 */
1899 ps_timeout = ieee80211_tu_to_usec(
1900 vif->bss_conf.beacon_int) / 1000;
1901 }
1902
1903 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
1904 ps_timeout);
1905 if (ret) {
1906 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
1907 arvif->vdev_id, ret);
1908 return ret;
1909 }
1910 } else {
1911 psmode = WMI_STA_PS_MODE_DISABLED;
1912 }
1913
1914 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
1915 arvif->vdev_id, psmode ? "enable" : "disable");
1916
1917 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1918 if (ret) {
1919 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
1920 psmode, arvif->vdev_id, ret);
1921 return ret;
1922 }
1923
1924 return 0;
1925 }
1926
ath10k_mac_vif_disable_keepalive(struct ath10k_vif * arvif)1927 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1928 {
1929 struct ath10k *ar = arvif->ar;
1930 struct wmi_sta_keepalive_arg arg = {};
1931 int ret;
1932
1933 lockdep_assert_held(&arvif->ar->conf_mutex);
1934
1935 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1936 return 0;
1937
1938 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1939 return 0;
1940
1941 /* Some firmware revisions have a bug and ignore the `enabled` field.
1942 * Instead use the interval to disable the keepalive.
1943 */
1944 arg.vdev_id = arvif->vdev_id;
1945 arg.enabled = 1;
1946 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
1947 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
1948
1949 ret = ath10k_wmi_sta_keepalive(ar, &arg);
1950 if (ret) {
1951 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
1952 arvif->vdev_id, ret);
1953 return ret;
1954 }
1955
1956 return 0;
1957 }
1958
ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif * arvif)1959 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
1960 {
1961 struct ath10k *ar = arvif->ar;
1962 struct ieee80211_vif *vif = arvif->vif;
1963 int ret;
1964
1965 lockdep_assert_held(&arvif->ar->conf_mutex);
1966
1967 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
1968 return;
1969
1970 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1971 return;
1972
1973 if (!vif->csa_active)
1974 return;
1975
1976 if (!arvif->is_up)
1977 return;
1978
1979 if (!ieee80211_csa_is_complete(vif)) {
1980 ieee80211_csa_update_counter(vif);
1981
1982 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1983 if (ret)
1984 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
1985 ret);
1986
1987 ret = ath10k_mac_setup_prb_tmpl(arvif);
1988 if (ret)
1989 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
1990 ret);
1991 } else {
1992 ieee80211_csa_finish(vif);
1993 }
1994 }
1995
ath10k_mac_vif_ap_csa_work(struct work_struct * work)1996 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
1997 {
1998 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1999 ap_csa_work);
2000 struct ath10k *ar = arvif->ar;
2001
2002 mutex_lock(&ar->conf_mutex);
2003 ath10k_mac_vif_ap_csa_count_down(arvif);
2004 mutex_unlock(&ar->conf_mutex);
2005 }
2006
ath10k_mac_handle_beacon_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2007 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
2008 struct ieee80211_vif *vif)
2009 {
2010 struct sk_buff *skb = data;
2011 struct ieee80211_mgmt *mgmt = (void *)skb->data;
2012 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2013
2014 if (vif->type != NL80211_IFTYPE_STATION)
2015 return;
2016
2017 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
2018 return;
2019
2020 cancel_delayed_work(&arvif->connection_loss_work);
2021 }
2022
ath10k_mac_handle_beacon(struct ath10k * ar,struct sk_buff * skb)2023 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
2024 {
2025 ieee80211_iterate_active_interfaces_atomic(ar->hw,
2026 IEEE80211_IFACE_ITER_NORMAL,
2027 ath10k_mac_handle_beacon_iter,
2028 skb);
2029 }
2030
ath10k_mac_handle_beacon_miss_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2031 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
2032 struct ieee80211_vif *vif)
2033 {
2034 u32 *vdev_id = data;
2035 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2036 struct ath10k *ar = arvif->ar;
2037 struct ieee80211_hw *hw = ar->hw;
2038
2039 if (arvif->vdev_id != *vdev_id)
2040 return;
2041
2042 if (!arvif->is_up)
2043 return;
2044
2045 ieee80211_beacon_loss(vif);
2046
2047 /* Firmware doesn't report beacon loss events repeatedly. If AP probe
2048 * (done by mac80211) succeeds but beacons do not resume then it
2049 * doesn't make sense to continue operation. Queue connection loss work
2050 * which can be cancelled when beacon is received.
2051 */
2052 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
2053 ATH10K_CONNECTION_LOSS_HZ);
2054 }
2055
ath10k_mac_handle_beacon_miss(struct ath10k * ar,u32 vdev_id)2056 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
2057 {
2058 ieee80211_iterate_active_interfaces_atomic(ar->hw,
2059 IEEE80211_IFACE_ITER_NORMAL,
2060 ath10k_mac_handle_beacon_miss_iter,
2061 &vdev_id);
2062 }
2063
ath10k_mac_vif_sta_connection_loss_work(struct work_struct * work)2064 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
2065 {
2066 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
2067 connection_loss_work.work);
2068 struct ieee80211_vif *vif = arvif->vif;
2069
2070 if (!arvif->is_up)
2071 return;
2072
2073 ieee80211_connection_loss(vif);
2074 }
2075
2076 /**********************/
2077 /* Station management */
2078 /**********************/
2079
ath10k_peer_assoc_h_listen_intval(struct ath10k * ar,struct ieee80211_vif * vif)2080 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
2081 struct ieee80211_vif *vif)
2082 {
2083 /* Some firmware revisions have unstable STA powersave when listen
2084 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
2085 * generate NullFunc frames properly even if buffered frames have been
2086 * indicated in Beacon TIM. Firmware would seldom wake up to pull
2087 * buffered frames. Often pinging the device from AP would simply fail.
2088 *
2089 * As a workaround set it to 1.
2090 */
2091 if (vif->type == NL80211_IFTYPE_STATION)
2092 return 1;
2093
2094 return ar->hw->conf.listen_interval;
2095 }
2096
ath10k_peer_assoc_h_basic(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2097 static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
2098 struct ieee80211_vif *vif,
2099 struct ieee80211_sta *sta,
2100 struct wmi_peer_assoc_complete_arg *arg)
2101 {
2102 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2103 u32 aid;
2104
2105 lockdep_assert_held(&ar->conf_mutex);
2106
2107 if (vif->type == NL80211_IFTYPE_STATION)
2108 aid = vif->bss_conf.aid;
2109 else
2110 aid = sta->aid;
2111
2112 ether_addr_copy(arg->addr, sta->addr);
2113 arg->vdev_id = arvif->vdev_id;
2114 arg->peer_aid = aid;
2115 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
2116 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
2117 arg->peer_num_spatial_streams = 1;
2118 arg->peer_caps = vif->bss_conf.assoc_capability;
2119 }
2120
ath10k_peer_assoc_h_crypto(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2121 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
2122 struct ieee80211_vif *vif,
2123 struct ieee80211_sta *sta,
2124 struct wmi_peer_assoc_complete_arg *arg)
2125 {
2126 struct ieee80211_bss_conf *info = &vif->bss_conf;
2127 struct cfg80211_chan_def def;
2128 struct cfg80211_bss *bss;
2129 const u8 *rsnie = NULL;
2130 const u8 *wpaie = NULL;
2131
2132 lockdep_assert_held(&ar->conf_mutex);
2133
2134 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2135 return;
2136
2137 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
2138 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
2139 if (bss) {
2140 const struct cfg80211_bss_ies *ies;
2141
2142 rcu_read_lock();
2143 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
2144
2145 ies = rcu_dereference(bss->ies);
2146
2147 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
2148 WLAN_OUI_TYPE_MICROSOFT_WPA,
2149 ies->data,
2150 ies->len);
2151 rcu_read_unlock();
2152 cfg80211_put_bss(ar->hw->wiphy, bss);
2153 }
2154
2155 /* FIXME: base on RSN IE/WPA IE is a correct idea? */
2156 if (rsnie || wpaie) {
2157 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
2158 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
2159 }
2160
2161 if (wpaie) {
2162 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
2163 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
2164 }
2165
2166 if (sta->mfp &&
2167 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
2168 ar->running_fw->fw_file.fw_features)) {
2169 arg->peer_flags |= ar->wmi.peer_flags->pmf;
2170 }
2171 }
2172
ath10k_peer_assoc_h_rates(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2173 static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2174 struct ieee80211_vif *vif,
2175 struct ieee80211_sta *sta,
2176 struct wmi_peer_assoc_complete_arg *arg)
2177 {
2178 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2179 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
2180 struct cfg80211_chan_def def;
2181 const struct ieee80211_supported_band *sband;
2182 const struct ieee80211_rate *rates;
2183 enum nl80211_band band;
2184 u32 ratemask;
2185 u8 rate;
2186 int i;
2187
2188 lockdep_assert_held(&ar->conf_mutex);
2189
2190 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2191 return;
2192
2193 band = def.chan->band;
2194 sband = ar->hw->wiphy->bands[band];
2195 ratemask = sta->supp_rates[band];
2196 ratemask &= arvif->bitrate_mask.control[band].legacy;
2197 rates = sband->bitrates;
2198
2199 rateset->num_rates = 0;
2200
2201 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2202 if (!(ratemask & 1))
2203 continue;
2204
2205 rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
2206 rateset->rates[rateset->num_rates] = rate;
2207 rateset->num_rates++;
2208 }
2209 }
2210
2211 static bool
ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])2212 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2213 {
2214 int nss;
2215
2216 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2217 if (ht_mcs_mask[nss])
2218 return false;
2219
2220 return true;
2221 }
2222
2223 static bool
ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])2224 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2225 {
2226 int nss;
2227
2228 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2229 if (vht_mcs_mask[nss])
2230 return false;
2231
2232 return true;
2233 }
2234
ath10k_peer_assoc_h_ht(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2235 static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2236 struct ieee80211_vif *vif,
2237 struct ieee80211_sta *sta,
2238 struct wmi_peer_assoc_complete_arg *arg)
2239 {
2240 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2241 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2242 struct cfg80211_chan_def def;
2243 enum nl80211_band band;
2244 const u8 *ht_mcs_mask;
2245 const u16 *vht_mcs_mask;
2246 int i, n;
2247 u8 max_nss;
2248 u32 stbc;
2249
2250 lockdep_assert_held(&ar->conf_mutex);
2251
2252 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2253 return;
2254
2255 if (!ht_cap->ht_supported)
2256 return;
2257
2258 band = def.chan->band;
2259 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2260 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2261
2262 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2263 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2264 return;
2265
2266 arg->peer_flags |= ar->wmi.peer_flags->ht;
2267 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2268 ht_cap->ampdu_factor)) - 1;
2269
2270 arg->peer_mpdu_density =
2271 ath10k_parse_mpdudensity(ht_cap->ampdu_density);
2272
2273 arg->peer_ht_caps = ht_cap->cap;
2274 arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2275
2276 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
2277 arg->peer_flags |= ar->wmi.peer_flags->ldbc;
2278
2279 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
2280 arg->peer_flags |= ar->wmi.peer_flags->bw40;
2281 arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2282 }
2283
2284 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2285 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2286 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2287
2288 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2289 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2290 }
2291
2292 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2293 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
2294 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2295 }
2296
2297 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
2298 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2299 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2300 stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2301 arg->peer_rate_caps |= stbc;
2302 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2303 }
2304
2305 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2306 arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2307 else if (ht_cap->mcs.rx_mask[1])
2308 arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2309
2310 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2311 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2312 (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2313 max_nss = (i / 8) + 1;
2314 arg->peer_ht_rates.rates[n++] = i;
2315 }
2316
2317 /*
2318 * This is a workaround for HT-enabled STAs which break the spec
2319 * and have no HT capabilities RX mask (no HT RX MCS map).
2320 *
2321 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2322 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2323 *
2324 * Firmware asserts if such situation occurs.
2325 */
2326 if (n == 0) {
2327 arg->peer_ht_rates.num_rates = 8;
2328 for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2329 arg->peer_ht_rates.rates[i] = i;
2330 } else {
2331 arg->peer_ht_rates.num_rates = n;
2332 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2333 }
2334
2335 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
2336 arg->addr,
2337 arg->peer_ht_rates.num_rates,
2338 arg->peer_num_spatial_streams);
2339 }
2340
ath10k_peer_assoc_qos_ap(struct ath10k * ar,struct ath10k_vif * arvif,struct ieee80211_sta * sta)2341 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2342 struct ath10k_vif *arvif,
2343 struct ieee80211_sta *sta)
2344 {
2345 u32 uapsd = 0;
2346 u32 max_sp = 0;
2347 int ret = 0;
2348
2349 lockdep_assert_held(&ar->conf_mutex);
2350
2351 if (sta->wme && sta->uapsd_queues) {
2352 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2353 sta->uapsd_queues, sta->max_sp);
2354
2355 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2356 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2357 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2358 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2359 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2360 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2361 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2362 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2363 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2364 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2365 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2366 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2367
2368 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2369 max_sp = sta->max_sp;
2370
2371 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2372 sta->addr,
2373 WMI_AP_PS_PEER_PARAM_UAPSD,
2374 uapsd);
2375 if (ret) {
2376 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
2377 arvif->vdev_id, ret);
2378 return ret;
2379 }
2380
2381 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2382 sta->addr,
2383 WMI_AP_PS_PEER_PARAM_MAX_SP,
2384 max_sp);
2385 if (ret) {
2386 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
2387 arvif->vdev_id, ret);
2388 return ret;
2389 }
2390
2391 /* TODO setup this based on STA listen interval and
2392 * beacon interval. Currently we don't know
2393 * sta->listen_interval - mac80211 patch required.
2394 * Currently use 10 seconds
2395 */
2396 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
2397 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2398 10);
2399 if (ret) {
2400 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
2401 arvif->vdev_id, ret);
2402 return ret;
2403 }
2404 }
2405
2406 return 0;
2407 }
2408
2409 static u16
ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])2410 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2411 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2412 {
2413 int idx_limit;
2414 int nss;
2415 u16 mcs_map;
2416 u16 mcs;
2417
2418 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2419 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
2420 vht_mcs_limit[nss];
2421
2422 if (mcs_map)
2423 idx_limit = fls(mcs_map) - 1;
2424 else
2425 idx_limit = -1;
2426
2427 switch (idx_limit) {
2428 case 0: /* fall through */
2429 case 1: /* fall through */
2430 case 2: /* fall through */
2431 case 3: /* fall through */
2432 case 4: /* fall through */
2433 case 5: /* fall through */
2434 case 6: /* fall through */
2435 default:
2436 /* see ath10k_mac_can_set_bitrate_mask() */
2437 WARN_ON(1);
2438 /* fall through */
2439 case -1:
2440 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2441 break;
2442 case 7:
2443 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2444 break;
2445 case 8:
2446 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2447 break;
2448 case 9:
2449 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2450 break;
2451 }
2452
2453 tx_mcs_set &= ~(0x3 << (nss * 2));
2454 tx_mcs_set |= mcs << (nss * 2);
2455 }
2456
2457 return tx_mcs_set;
2458 }
2459
ath10k_peer_assoc_h_vht(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2460 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2461 struct ieee80211_vif *vif,
2462 struct ieee80211_sta *sta,
2463 struct wmi_peer_assoc_complete_arg *arg)
2464 {
2465 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2466 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2467 struct cfg80211_chan_def def;
2468 enum nl80211_band band;
2469 const u16 *vht_mcs_mask;
2470 u8 ampdu_factor;
2471 u8 max_nss, vht_mcs;
2472 int i;
2473
2474 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2475 return;
2476
2477 if (!vht_cap->vht_supported)
2478 return;
2479
2480 band = def.chan->band;
2481 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2482
2483 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2484 return;
2485
2486 arg->peer_flags |= ar->wmi.peer_flags->vht;
2487
2488 if (def.chan->band == NL80211_BAND_2GHZ)
2489 arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2490
2491 arg->peer_vht_caps = vht_cap->cap;
2492
2493 ampdu_factor = (vht_cap->cap &
2494 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2495 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2496
2497 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2498 * zero in VHT IE. Using it would result in degraded throughput.
2499 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2500 * it if VHT max_mpdu is smaller.
2501 */
2502 arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2503 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2504 ampdu_factor)) - 1);
2505
2506 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2507 arg->peer_flags |= ar->wmi.peer_flags->bw80;
2508
2509 if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
2510 arg->peer_flags |= ar->wmi.peer_flags->bw160;
2511
2512 /* Calculate peer NSS capability from VHT capabilities if STA
2513 * supports VHT.
2514 */
2515 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
2516 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
2517 (2 * i) & 3;
2518
2519 if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) &&
2520 vht_mcs_mask[i])
2521 max_nss = i + 1;
2522 }
2523 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2524 arg->peer_vht_rates.rx_max_rate =
2525 __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2526 arg->peer_vht_rates.rx_mcs_set =
2527 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2528 arg->peer_vht_rates.tx_max_rate =
2529 __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
2530 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2531 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
2532
2533 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
2534 sta->addr, arg->peer_max_mpdu, arg->peer_flags);
2535
2536 if (arg->peer_vht_rates.rx_max_rate &&
2537 (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) {
2538 switch (arg->peer_vht_rates.rx_max_rate) {
2539 case 1560:
2540 /* Must be 2x2 at 160Mhz is all it can do. */
2541 arg->peer_bw_rxnss_override = 2;
2542 break;
2543 case 780:
2544 /* Can only do 1x1 at 160Mhz (Long Guard Interval) */
2545 arg->peer_bw_rxnss_override = 1;
2546 break;
2547 }
2548 }
2549 }
2550
ath10k_peer_assoc_h_qos(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2551 static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2552 struct ieee80211_vif *vif,
2553 struct ieee80211_sta *sta,
2554 struct wmi_peer_assoc_complete_arg *arg)
2555 {
2556 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2557
2558 switch (arvif->vdev_type) {
2559 case WMI_VDEV_TYPE_AP:
2560 if (sta->wme)
2561 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2562
2563 if (sta->wme && sta->uapsd_queues) {
2564 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
2565 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2566 }
2567 break;
2568 case WMI_VDEV_TYPE_STA:
2569 if (sta->wme)
2570 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2571 break;
2572 case WMI_VDEV_TYPE_IBSS:
2573 if (sta->wme)
2574 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2575 break;
2576 default:
2577 break;
2578 }
2579
2580 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
2581 sta->addr, !!(arg->peer_flags &
2582 arvif->ar->wmi.peer_flags->qos));
2583 }
2584
ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta * sta)2585 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2586 {
2587 return sta->supp_rates[NL80211_BAND_2GHZ] >>
2588 ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2589 }
2590
ath10k_mac_get_phymode_vht(struct ath10k * ar,struct ieee80211_sta * sta)2591 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
2592 struct ieee80211_sta *sta)
2593 {
2594 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
2595 switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
2596 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
2597 return MODE_11AC_VHT160;
2598 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
2599 return MODE_11AC_VHT80_80;
2600 default:
2601 /* not sure if this is a valid case? */
2602 return MODE_11AC_VHT160;
2603 }
2604 }
2605
2606 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2607 return MODE_11AC_VHT80;
2608
2609 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2610 return MODE_11AC_VHT40;
2611
2612 if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
2613 return MODE_11AC_VHT20;
2614
2615 return MODE_UNKNOWN;
2616 }
2617
ath10k_peer_assoc_h_phymode(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2618 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2619 struct ieee80211_vif *vif,
2620 struct ieee80211_sta *sta,
2621 struct wmi_peer_assoc_complete_arg *arg)
2622 {
2623 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2624 struct cfg80211_chan_def def;
2625 enum nl80211_band band;
2626 const u8 *ht_mcs_mask;
2627 const u16 *vht_mcs_mask;
2628 enum wmi_phy_mode phymode = MODE_UNKNOWN;
2629
2630 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2631 return;
2632
2633 band = def.chan->band;
2634 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2635 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2636
2637 switch (band) {
2638 case NL80211_BAND_2GHZ:
2639 if (sta->vht_cap.vht_supported &&
2640 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2641 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2642 phymode = MODE_11AC_VHT40;
2643 else
2644 phymode = MODE_11AC_VHT20;
2645 } else if (sta->ht_cap.ht_supported &&
2646 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2647 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2648 phymode = MODE_11NG_HT40;
2649 else
2650 phymode = MODE_11NG_HT20;
2651 } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
2652 phymode = MODE_11G;
2653 } else {
2654 phymode = MODE_11B;
2655 }
2656
2657 break;
2658 case NL80211_BAND_5GHZ:
2659 /*
2660 * Check VHT first.
2661 */
2662 if (sta->vht_cap.vht_supported &&
2663 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2664 phymode = ath10k_mac_get_phymode_vht(ar, sta);
2665 } else if (sta->ht_cap.ht_supported &&
2666 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2667 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
2668 phymode = MODE_11NA_HT40;
2669 else
2670 phymode = MODE_11NA_HT20;
2671 } else {
2672 phymode = MODE_11A;
2673 }
2674
2675 break;
2676 default:
2677 break;
2678 }
2679
2680 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
2681 sta->addr, ath10k_wmi_phymode_str(phymode));
2682
2683 arg->peer_phymode = phymode;
2684 WARN_ON(phymode == MODE_UNKNOWN);
2685 }
2686
ath10k_peer_assoc_prepare(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2687 static int ath10k_peer_assoc_prepare(struct ath10k *ar,
2688 struct ieee80211_vif *vif,
2689 struct ieee80211_sta *sta,
2690 struct wmi_peer_assoc_complete_arg *arg)
2691 {
2692 lockdep_assert_held(&ar->conf_mutex);
2693
2694 memset(arg, 0, sizeof(*arg));
2695
2696 ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
2697 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
2698 ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
2699 ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
2700 ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
2701 ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2702 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
2703
2704 return 0;
2705 }
2706
2707 static const u32 ath10k_smps_map[] = {
2708 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2709 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2710 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2711 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2712 };
2713
ath10k_setup_peer_smps(struct ath10k * ar,struct ath10k_vif * arvif,const u8 * addr,const struct ieee80211_sta_ht_cap * ht_cap)2714 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2715 const u8 *addr,
2716 const struct ieee80211_sta_ht_cap *ht_cap)
2717 {
2718 int smps;
2719
2720 if (!ht_cap->ht_supported)
2721 return 0;
2722
2723 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2724 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2725
2726 if (smps >= ARRAY_SIZE(ath10k_smps_map))
2727 return -EINVAL;
2728
2729 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
2730 WMI_PEER_SMPS_STATE,
2731 ath10k_smps_map[smps]);
2732 }
2733
ath10k_mac_vif_recalc_txbf(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta_vht_cap vht_cap)2734 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2735 struct ieee80211_vif *vif,
2736 struct ieee80211_sta_vht_cap vht_cap)
2737 {
2738 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2739 int ret;
2740 u32 param;
2741 u32 value;
2742
2743 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2744 return 0;
2745
2746 if (!(ar->vht_cap_info &
2747 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2748 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2749 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2750 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2751 return 0;
2752
2753 param = ar->wmi.vdev_param->txbf;
2754 value = 0;
2755
2756 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2757 return 0;
2758
2759 /* The following logic is correct. If a remote STA advertises support
2760 * for being a beamformer then we should enable us being a beamformee.
2761 */
2762
2763 if (ar->vht_cap_info &
2764 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2765 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2766 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2767 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2768
2769 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2770 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2771 }
2772
2773 if (ar->vht_cap_info &
2774 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2775 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2776 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2777 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2778
2779 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2780 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2781 }
2782
2783 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2784 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2785
2786 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2787 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2788
2789 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
2790 if (ret) {
2791 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
2792 value, ret);
2793 return ret;
2794 }
2795
2796 return 0;
2797 }
2798
2799 /* can be called only in mac80211 callbacks due to `key_count` usage */
ath10k_bss_assoc(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf)2800 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
2801 struct ieee80211_vif *vif,
2802 struct ieee80211_bss_conf *bss_conf)
2803 {
2804 struct ath10k *ar = hw->priv;
2805 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2806 struct ieee80211_sta_ht_cap ht_cap;
2807 struct ieee80211_sta_vht_cap vht_cap;
2808 struct wmi_peer_assoc_complete_arg peer_arg;
2809 struct ieee80211_sta *ap_sta;
2810 int ret;
2811
2812 lockdep_assert_held(&ar->conf_mutex);
2813
2814 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
2815 arvif->vdev_id, arvif->bssid, arvif->aid);
2816
2817 rcu_read_lock();
2818
2819 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
2820 if (!ap_sta) {
2821 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
2822 bss_conf->bssid, arvif->vdev_id);
2823 rcu_read_unlock();
2824 return;
2825 }
2826
2827 /* ap_sta must be accessed only within rcu section which must be left
2828 * before calling ath10k_setup_peer_smps() which might sleep.
2829 */
2830 ht_cap = ap_sta->ht_cap;
2831 vht_cap = ap_sta->vht_cap;
2832
2833 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
2834 if (ret) {
2835 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
2836 bss_conf->bssid, arvif->vdev_id, ret);
2837 rcu_read_unlock();
2838 return;
2839 }
2840
2841 rcu_read_unlock();
2842
2843 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2844 if (ret) {
2845 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
2846 bss_conf->bssid, arvif->vdev_id, ret);
2847 return;
2848 }
2849
2850 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
2851 if (ret) {
2852 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
2853 arvif->vdev_id, ret);
2854 return;
2855 }
2856
2857 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2858 if (ret) {
2859 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
2860 arvif->vdev_id, bss_conf->bssid, ret);
2861 return;
2862 }
2863
2864 ath10k_dbg(ar, ATH10K_DBG_MAC,
2865 "mac vdev %d up (associated) bssid %pM aid %d\n",
2866 arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
2867
2868 WARN_ON(arvif->is_up);
2869
2870 arvif->aid = bss_conf->aid;
2871 ether_addr_copy(arvif->bssid, bss_conf->bssid);
2872
2873 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
2874 if (ret) {
2875 ath10k_warn(ar, "failed to set vdev %d up: %d\n",
2876 arvif->vdev_id, ret);
2877 return;
2878 }
2879
2880 arvif->is_up = true;
2881
2882 /* Workaround: Some firmware revisions (tested with qca6174
2883 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
2884 * poked with peer param command.
2885 */
2886 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
2887 WMI_PEER_DUMMY_VAR, 1);
2888 if (ret) {
2889 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
2890 arvif->bssid, arvif->vdev_id, ret);
2891 return;
2892 }
2893 }
2894
ath10k_bss_disassoc(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2895 static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
2896 struct ieee80211_vif *vif)
2897 {
2898 struct ath10k *ar = hw->priv;
2899 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2900 struct ieee80211_sta_vht_cap vht_cap = {};
2901 int ret;
2902
2903 lockdep_assert_held(&ar->conf_mutex);
2904
2905 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
2906 arvif->vdev_id, arvif->bssid);
2907
2908 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
2909 if (ret)
2910 ath10k_warn(ar, "failed to down vdev %i: %d\n",
2911 arvif->vdev_id, ret);
2912
2913 arvif->def_wep_key_idx = -1;
2914
2915 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2916 if (ret) {
2917 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
2918 arvif->vdev_id, ret);
2919 return;
2920 }
2921
2922 arvif->is_up = false;
2923
2924 cancel_delayed_work_sync(&arvif->connection_loss_work);
2925 }
2926
ath10k_station_assoc(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,bool reassoc)2927 static int ath10k_station_assoc(struct ath10k *ar,
2928 struct ieee80211_vif *vif,
2929 struct ieee80211_sta *sta,
2930 bool reassoc)
2931 {
2932 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2933 struct wmi_peer_assoc_complete_arg peer_arg;
2934 int ret = 0;
2935
2936 lockdep_assert_held(&ar->conf_mutex);
2937
2938 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
2939 if (ret) {
2940 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
2941 sta->addr, arvif->vdev_id, ret);
2942 return ret;
2943 }
2944
2945 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2946 if (ret) {
2947 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
2948 sta->addr, arvif->vdev_id, ret);
2949 return ret;
2950 }
2951
2952 /* Re-assoc is run only to update supported rates for given station. It
2953 * doesn't make much sense to reconfigure the peer completely.
2954 */
2955 if (!reassoc) {
2956 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
2957 &sta->ht_cap);
2958 if (ret) {
2959 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
2960 arvif->vdev_id, ret);
2961 return ret;
2962 }
2963
2964 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
2965 if (ret) {
2966 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
2967 sta->addr, arvif->vdev_id, ret);
2968 return ret;
2969 }
2970
2971 if (!sta->wme) {
2972 arvif->num_legacy_stations++;
2973 ret = ath10k_recalc_rtscts_prot(arvif);
2974 if (ret) {
2975 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2976 arvif->vdev_id, ret);
2977 return ret;
2978 }
2979 }
2980
2981 /* Plumb cached keys only for static WEP */
2982 if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) {
2983 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
2984 if (ret) {
2985 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
2986 arvif->vdev_id, ret);
2987 return ret;
2988 }
2989 }
2990 }
2991
2992 return ret;
2993 }
2994
ath10k_station_disassoc(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta)2995 static int ath10k_station_disassoc(struct ath10k *ar,
2996 struct ieee80211_vif *vif,
2997 struct ieee80211_sta *sta)
2998 {
2999 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3000 int ret = 0;
3001
3002 lockdep_assert_held(&ar->conf_mutex);
3003
3004 if (!sta->wme) {
3005 arvif->num_legacy_stations--;
3006 ret = ath10k_recalc_rtscts_prot(arvif);
3007 if (ret) {
3008 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
3009 arvif->vdev_id, ret);
3010 return ret;
3011 }
3012 }
3013
3014 ret = ath10k_clear_peer_keys(arvif, sta->addr);
3015 if (ret) {
3016 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
3017 arvif->vdev_id, ret);
3018 return ret;
3019 }
3020
3021 return ret;
3022 }
3023
3024 /**************/
3025 /* Regulatory */
3026 /**************/
3027
ath10k_update_channel_list(struct ath10k * ar)3028 static int ath10k_update_channel_list(struct ath10k *ar)
3029 {
3030 struct ieee80211_hw *hw = ar->hw;
3031 struct ieee80211_supported_band **bands;
3032 enum nl80211_band band;
3033 struct ieee80211_channel *channel;
3034 struct wmi_scan_chan_list_arg arg = {0};
3035 struct wmi_channel_arg *ch;
3036 bool passive;
3037 int len;
3038 int ret;
3039 int i;
3040
3041 lockdep_assert_held(&ar->conf_mutex);
3042
3043 bands = hw->wiphy->bands;
3044 for (band = 0; band < NUM_NL80211_BANDS; band++) {
3045 if (!bands[band])
3046 continue;
3047
3048 for (i = 0; i < bands[band]->n_channels; i++) {
3049 if (bands[band]->channels[i].flags &
3050 IEEE80211_CHAN_DISABLED)
3051 continue;
3052
3053 arg.n_channels++;
3054 }
3055 }
3056
3057 len = sizeof(struct wmi_channel_arg) * arg.n_channels;
3058 arg.channels = kzalloc(len, GFP_KERNEL);
3059 if (!arg.channels)
3060 return -ENOMEM;
3061
3062 ch = arg.channels;
3063 for (band = 0; band < NUM_NL80211_BANDS; band++) {
3064 if (!bands[band])
3065 continue;
3066
3067 for (i = 0; i < bands[band]->n_channels; i++) {
3068 channel = &bands[band]->channels[i];
3069
3070 if (channel->flags & IEEE80211_CHAN_DISABLED)
3071 continue;
3072
3073 ch->allow_ht = true;
3074
3075 /* FIXME: when should we really allow VHT? */
3076 ch->allow_vht = true;
3077
3078 ch->allow_ibss =
3079 !(channel->flags & IEEE80211_CHAN_NO_IR);
3080
3081 ch->ht40plus =
3082 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
3083
3084 ch->chan_radar =
3085 !!(channel->flags & IEEE80211_CHAN_RADAR);
3086
3087 passive = channel->flags & IEEE80211_CHAN_NO_IR;
3088 ch->passive = passive;
3089
3090 /* the firmware is ignoring the "radar" flag of the
3091 * channel and is scanning actively using Probe Requests
3092 * on "Radar detection"/DFS channels which are not
3093 * marked as "available"
3094 */
3095 ch->passive |= ch->chan_radar;
3096
3097 ch->freq = channel->center_freq;
3098 ch->band_center_freq1 = channel->center_freq;
3099 ch->min_power = 0;
3100 ch->max_power = channel->max_power * 2;
3101 ch->max_reg_power = channel->max_reg_power * 2;
3102 ch->max_antenna_gain = channel->max_antenna_gain * 2;
3103 ch->reg_class_id = 0; /* FIXME */
3104
3105 /* FIXME: why use only legacy modes, why not any
3106 * HT/VHT modes? Would that even make any
3107 * difference?
3108 */
3109 if (channel->band == NL80211_BAND_2GHZ)
3110 ch->mode = MODE_11G;
3111 else
3112 ch->mode = MODE_11A;
3113
3114 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
3115 continue;
3116
3117 ath10k_dbg(ar, ATH10K_DBG_WMI,
3118 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
3119 ch - arg.channels, arg.n_channels,
3120 ch->freq, ch->max_power, ch->max_reg_power,
3121 ch->max_antenna_gain, ch->mode);
3122
3123 ch++;
3124 }
3125 }
3126
3127 ret = ath10k_wmi_scan_chan_list(ar, &arg);
3128 kfree(arg.channels);
3129
3130 return ret;
3131 }
3132
3133 static enum wmi_dfs_region
ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)3134 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
3135 {
3136 switch (dfs_region) {
3137 case NL80211_DFS_UNSET:
3138 return WMI_UNINIT_DFS_DOMAIN;
3139 case NL80211_DFS_FCC:
3140 return WMI_FCC_DFS_DOMAIN;
3141 case NL80211_DFS_ETSI:
3142 return WMI_ETSI_DFS_DOMAIN;
3143 case NL80211_DFS_JP:
3144 return WMI_MKK4_DFS_DOMAIN;
3145 }
3146 return WMI_UNINIT_DFS_DOMAIN;
3147 }
3148
ath10k_regd_update(struct ath10k * ar)3149 static void ath10k_regd_update(struct ath10k *ar)
3150 {
3151 struct reg_dmn_pair_mapping *regpair;
3152 int ret;
3153 enum wmi_dfs_region wmi_dfs_reg;
3154 enum nl80211_dfs_regions nl_dfs_reg;
3155
3156 lockdep_assert_held(&ar->conf_mutex);
3157
3158 ret = ath10k_update_channel_list(ar);
3159 if (ret)
3160 ath10k_warn(ar, "failed to update channel list: %d\n", ret);
3161
3162 regpair = ar->ath_common.regulatory.regpair;
3163
3164 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3165 nl_dfs_reg = ar->dfs_detector->region;
3166 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
3167 } else {
3168 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
3169 }
3170
3171 /* Target allows setting up per-band regdomain but ath_common provides
3172 * a combined one only
3173 */
3174 ret = ath10k_wmi_pdev_set_regdomain(ar,
3175 regpair->reg_domain,
3176 regpair->reg_domain, /* 2ghz */
3177 regpair->reg_domain, /* 5ghz */
3178 regpair->reg_2ghz_ctl,
3179 regpair->reg_5ghz_ctl,
3180 wmi_dfs_reg);
3181 if (ret)
3182 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
3183 }
3184
ath10k_mac_update_channel_list(struct ath10k * ar,struct ieee80211_supported_band * band)3185 static void ath10k_mac_update_channel_list(struct ath10k *ar,
3186 struct ieee80211_supported_band *band)
3187 {
3188 int i;
3189
3190 if (ar->low_5ghz_chan && ar->high_5ghz_chan) {
3191 for (i = 0; i < band->n_channels; i++) {
3192 if (band->channels[i].center_freq < ar->low_5ghz_chan ||
3193 band->channels[i].center_freq > ar->high_5ghz_chan)
3194 band->channels[i].flags |=
3195 IEEE80211_CHAN_DISABLED;
3196 }
3197 }
3198 }
3199
ath10k_reg_notifier(struct wiphy * wiphy,struct regulatory_request * request)3200 static void ath10k_reg_notifier(struct wiphy *wiphy,
3201 struct regulatory_request *request)
3202 {
3203 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
3204 struct ath10k *ar = hw->priv;
3205 bool result;
3206
3207 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
3208
3209 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3210 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
3211 request->dfs_region);
3212 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3213 request->dfs_region);
3214 if (!result)
3215 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
3216 request->dfs_region);
3217 }
3218
3219 mutex_lock(&ar->conf_mutex);
3220 if (ar->state == ATH10K_STATE_ON)
3221 ath10k_regd_update(ar);
3222 mutex_unlock(&ar->conf_mutex);
3223
3224 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
3225 ath10k_mac_update_channel_list(ar,
3226 ar->hw->wiphy->bands[NL80211_BAND_5GHZ]);
3227 }
3228
ath10k_stop_radar_confirmation(struct ath10k * ar)3229 static void ath10k_stop_radar_confirmation(struct ath10k *ar)
3230 {
3231 spin_lock_bh(&ar->data_lock);
3232 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_STOPPED;
3233 spin_unlock_bh(&ar->data_lock);
3234
3235 cancel_work_sync(&ar->radar_confirmation_work);
3236 }
3237
3238 /***************/
3239 /* TX handlers */
3240 /***************/
3241
3242 enum ath10k_mac_tx_path {
3243 ATH10K_MAC_TX_HTT,
3244 ATH10K_MAC_TX_HTT_MGMT,
3245 ATH10K_MAC_TX_WMI_MGMT,
3246 ATH10K_MAC_TX_UNKNOWN,
3247 };
3248
ath10k_mac_tx_lock(struct ath10k * ar,int reason)3249 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3250 {
3251 lockdep_assert_held(&ar->htt.tx_lock);
3252
3253 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3254 ar->tx_paused |= BIT(reason);
3255 ieee80211_stop_queues(ar->hw);
3256 }
3257
ath10k_mac_tx_unlock_iter(void * data,u8 * mac,struct ieee80211_vif * vif)3258 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3259 struct ieee80211_vif *vif)
3260 {
3261 struct ath10k *ar = data;
3262 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3263
3264 if (arvif->tx_paused)
3265 return;
3266
3267 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3268 }
3269
ath10k_mac_tx_unlock(struct ath10k * ar,int reason)3270 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3271 {
3272 lockdep_assert_held(&ar->htt.tx_lock);
3273
3274 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3275 ar->tx_paused &= ~BIT(reason);
3276
3277 if (ar->tx_paused)
3278 return;
3279
3280 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3281 IEEE80211_IFACE_ITER_RESUME_ALL,
3282 ath10k_mac_tx_unlock_iter,
3283 ar);
3284
3285 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
3286 }
3287
ath10k_mac_vif_tx_lock(struct ath10k_vif * arvif,int reason)3288 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3289 {
3290 struct ath10k *ar = arvif->ar;
3291
3292 lockdep_assert_held(&ar->htt.tx_lock);
3293
3294 WARN_ON(reason >= BITS_PER_LONG);
3295 arvif->tx_paused |= BIT(reason);
3296 ieee80211_stop_queue(ar->hw, arvif->vdev_id);
3297 }
3298
ath10k_mac_vif_tx_unlock(struct ath10k_vif * arvif,int reason)3299 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3300 {
3301 struct ath10k *ar = arvif->ar;
3302
3303 lockdep_assert_held(&ar->htt.tx_lock);
3304
3305 WARN_ON(reason >= BITS_PER_LONG);
3306 arvif->tx_paused &= ~BIT(reason);
3307
3308 if (ar->tx_paused)
3309 return;
3310
3311 if (arvif->tx_paused)
3312 return;
3313
3314 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3315 }
3316
ath10k_mac_vif_handle_tx_pause(struct ath10k_vif * arvif,enum wmi_tlv_tx_pause_id pause_id,enum wmi_tlv_tx_pause_action action)3317 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3318 enum wmi_tlv_tx_pause_id pause_id,
3319 enum wmi_tlv_tx_pause_action action)
3320 {
3321 struct ath10k *ar = arvif->ar;
3322
3323 lockdep_assert_held(&ar->htt.tx_lock);
3324
3325 switch (action) {
3326 case WMI_TLV_TX_PAUSE_ACTION_STOP:
3327 ath10k_mac_vif_tx_lock(arvif, pause_id);
3328 break;
3329 case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3330 ath10k_mac_vif_tx_unlock(arvif, pause_id);
3331 break;
3332 default:
3333 ath10k_dbg(ar, ATH10K_DBG_BOOT,
3334 "received unknown tx pause action %d on vdev %i, ignoring\n",
3335 action, arvif->vdev_id);
3336 break;
3337 }
3338 }
3339
3340 struct ath10k_mac_tx_pause {
3341 u32 vdev_id;
3342 enum wmi_tlv_tx_pause_id pause_id;
3343 enum wmi_tlv_tx_pause_action action;
3344 };
3345
ath10k_mac_handle_tx_pause_iter(void * data,u8 * mac,struct ieee80211_vif * vif)3346 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3347 struct ieee80211_vif *vif)
3348 {
3349 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3350 struct ath10k_mac_tx_pause *arg = data;
3351
3352 if (arvif->vdev_id != arg->vdev_id)
3353 return;
3354
3355 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3356 }
3357
ath10k_mac_handle_tx_pause_vdev(struct ath10k * ar,u32 vdev_id,enum wmi_tlv_tx_pause_id pause_id,enum wmi_tlv_tx_pause_action action)3358 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3359 enum wmi_tlv_tx_pause_id pause_id,
3360 enum wmi_tlv_tx_pause_action action)
3361 {
3362 struct ath10k_mac_tx_pause arg = {
3363 .vdev_id = vdev_id,
3364 .pause_id = pause_id,
3365 .action = action,
3366 };
3367
3368 spin_lock_bh(&ar->htt.tx_lock);
3369 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3370 IEEE80211_IFACE_ITER_RESUME_ALL,
3371 ath10k_mac_handle_tx_pause_iter,
3372 &arg);
3373 spin_unlock_bh(&ar->htt.tx_lock);
3374 }
3375
3376 static enum ath10k_hw_txrx_mode
ath10k_mac_tx_h_get_txmode(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct sk_buff * skb)3377 ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3378 struct ieee80211_vif *vif,
3379 struct ieee80211_sta *sta,
3380 struct sk_buff *skb)
3381 {
3382 const struct ieee80211_hdr *hdr = (void *)skb->data;
3383 __le16 fc = hdr->frame_control;
3384
3385 if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3386 return ATH10K_HW_TXRX_RAW;
3387
3388 if (ieee80211_is_mgmt(fc))
3389 return ATH10K_HW_TXRX_MGMT;
3390
3391 /* Workaround:
3392 *
3393 * NullFunc frames are mostly used to ping if a client or AP are still
3394 * reachable and responsive. This implies tx status reports must be
3395 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3396 * come to a conclusion that the other end disappeared and tear down
3397 * BSS connection or it can never disconnect from BSS/client (which is
3398 * the case).
3399 *
3400 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3401 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3402 * which seems to deliver correct tx reports for NullFunc frames. The
3403 * downside of using it is it ignores client powersave state so it can
3404 * end up disconnecting sleeping clients in AP mode. It should fix STA
3405 * mode though because AP don't sleep.
3406 */
3407 if (ar->htt.target_version_major < 3 &&
3408 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3409 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3410 ar->running_fw->fw_file.fw_features))
3411 return ATH10K_HW_TXRX_MGMT;
3412
3413 /* Workaround:
3414 *
3415 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3416 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3417 * to work with Ethernet txmode so use it.
3418 *
3419 * FIXME: Check if raw mode works with TDLS.
3420 */
3421 if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3422 return ATH10K_HW_TXRX_ETHERNET;
3423
3424 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
3425 return ATH10K_HW_TXRX_RAW;
3426
3427 return ATH10K_HW_TXRX_NATIVE_WIFI;
3428 }
3429
ath10k_tx_h_use_hwcrypto(struct ieee80211_vif * vif,struct sk_buff * skb)3430 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
3431 struct sk_buff *skb)
3432 {
3433 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3434 const struct ieee80211_hdr *hdr = (void *)skb->data;
3435 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3436 IEEE80211_TX_CTL_INJECTED;
3437
3438 if (!ieee80211_has_protected(hdr->frame_control))
3439 return false;
3440
3441 if ((info->flags & mask) == mask)
3442 return false;
3443
3444 if (vif)
3445 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt;
3446
3447 return true;
3448 }
3449
3450 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3451 * Control in the header.
3452 */
ath10k_tx_h_nwifi(struct ieee80211_hw * hw,struct sk_buff * skb)3453 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
3454 {
3455 struct ieee80211_hdr *hdr = (void *)skb->data;
3456 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3457 u8 *qos_ctl;
3458
3459 if (!ieee80211_is_data_qos(hdr->frame_control))
3460 return;
3461
3462 qos_ctl = ieee80211_get_qos_ctl(hdr);
3463 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3464 skb->data, (void *)qos_ctl - (void *)skb->data);
3465 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
3466
3467 /* Some firmware revisions don't handle sending QoS NullFunc well.
3468 * These frames are mainly used for CQM purposes so it doesn't really
3469 * matter whether QoS NullFunc or NullFunc are sent.
3470 */
3471 hdr = (void *)skb->data;
3472 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
3473 cb->flags &= ~ATH10K_SKB_F_QOS;
3474
3475 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3476 }
3477
ath10k_tx_h_8023(struct sk_buff * skb)3478 static void ath10k_tx_h_8023(struct sk_buff *skb)
3479 {
3480 struct ieee80211_hdr *hdr;
3481 struct rfc1042_hdr *rfc1042;
3482 struct ethhdr *eth;
3483 size_t hdrlen;
3484 u8 da[ETH_ALEN];
3485 u8 sa[ETH_ALEN];
3486 __be16 type;
3487
3488 hdr = (void *)skb->data;
3489 hdrlen = ieee80211_hdrlen(hdr->frame_control);
3490 rfc1042 = (void *)skb->data + hdrlen;
3491
3492 ether_addr_copy(da, ieee80211_get_DA(hdr));
3493 ether_addr_copy(sa, ieee80211_get_SA(hdr));
3494 type = rfc1042->snap_type;
3495
3496 skb_pull(skb, hdrlen + sizeof(*rfc1042));
3497 skb_push(skb, sizeof(*eth));
3498
3499 eth = (void *)skb->data;
3500 ether_addr_copy(eth->h_dest, da);
3501 ether_addr_copy(eth->h_source, sa);
3502 eth->h_proto = type;
3503 }
3504
ath10k_tx_h_add_p2p_noa_ie(struct ath10k * ar,struct ieee80211_vif * vif,struct sk_buff * skb)3505 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3506 struct ieee80211_vif *vif,
3507 struct sk_buff *skb)
3508 {
3509 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3510 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3511
3512 /* This is case only for P2P_GO */
3513 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
3514 return;
3515
3516 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3517 spin_lock_bh(&ar->data_lock);
3518 if (arvif->u.ap.noa_data)
3519 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
3520 GFP_ATOMIC))
3521 skb_put_data(skb, arvif->u.ap.noa_data,
3522 arvif->u.ap.noa_len);
3523 spin_unlock_bh(&ar->data_lock);
3524 }
3525 }
3526
ath10k_mac_tx_h_fill_cb(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_txq * txq,struct sk_buff * skb)3527 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3528 struct ieee80211_vif *vif,
3529 struct ieee80211_txq *txq,
3530 struct sk_buff *skb)
3531 {
3532 struct ieee80211_hdr *hdr = (void *)skb->data;
3533 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3534
3535 cb->flags = 0;
3536 if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3537 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3538
3539 if (ieee80211_is_mgmt(hdr->frame_control))
3540 cb->flags |= ATH10K_SKB_F_MGMT;
3541
3542 if (ieee80211_is_data_qos(hdr->frame_control))
3543 cb->flags |= ATH10K_SKB_F_QOS;
3544
3545 cb->vif = vif;
3546 cb->txq = txq;
3547 }
3548
ath10k_mac_tx_frm_has_freq(struct ath10k * ar)3549 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3550 {
3551 /* FIXME: Not really sure since when the behaviour changed. At some
3552 * point new firmware stopped requiring creation of peer entries for
3553 * offchannel tx (and actually creating them causes issues with wmi-htc
3554 * tx credit replenishment and reliability). Assuming it's at least 3.4
3555 * because that's when the `freq` was introduced to TX_FRM HTT command.
3556 */
3557 return (ar->htt.target_version_major >= 3 &&
3558 ar->htt.target_version_minor >= 4 &&
3559 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
3560 }
3561
ath10k_mac_tx_wmi_mgmt(struct ath10k * ar,struct sk_buff * skb)3562 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
3563 {
3564 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
3565 int ret = 0;
3566
3567 spin_lock_bh(&ar->data_lock);
3568
3569 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
3570 ath10k_warn(ar, "wmi mgmt tx queue is full\n");
3571 ret = -ENOSPC;
3572 goto unlock;
3573 }
3574
3575 __skb_queue_tail(q, skb);
3576 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
3577
3578 unlock:
3579 spin_unlock_bh(&ar->data_lock);
3580
3581 return ret;
3582 }
3583
3584 static enum ath10k_mac_tx_path
ath10k_mac_tx_h_get_txpath(struct ath10k * ar,struct sk_buff * skb,enum ath10k_hw_txrx_mode txmode)3585 ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3586 struct sk_buff *skb,
3587 enum ath10k_hw_txrx_mode txmode)
3588 {
3589 switch (txmode) {
3590 case ATH10K_HW_TXRX_RAW:
3591 case ATH10K_HW_TXRX_NATIVE_WIFI:
3592 case ATH10K_HW_TXRX_ETHERNET:
3593 return ATH10K_MAC_TX_HTT;
3594 case ATH10K_HW_TXRX_MGMT:
3595 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3596 ar->running_fw->fw_file.fw_features) ||
3597 test_bit(WMI_SERVICE_MGMT_TX_WMI,
3598 ar->wmi.svc_map))
3599 return ATH10K_MAC_TX_WMI_MGMT;
3600 else if (ar->htt.target_version_major >= 3)
3601 return ATH10K_MAC_TX_HTT;
3602 else
3603 return ATH10K_MAC_TX_HTT_MGMT;
3604 }
3605
3606 return ATH10K_MAC_TX_UNKNOWN;
3607 }
3608
ath10k_mac_tx_submit(struct ath10k * ar,enum ath10k_hw_txrx_mode txmode,enum ath10k_mac_tx_path txpath,struct sk_buff * skb)3609 static int ath10k_mac_tx_submit(struct ath10k *ar,
3610 enum ath10k_hw_txrx_mode txmode,
3611 enum ath10k_mac_tx_path txpath,
3612 struct sk_buff *skb)
3613 {
3614 struct ath10k_htt *htt = &ar->htt;
3615 int ret = -EINVAL;
3616
3617 switch (txpath) {
3618 case ATH10K_MAC_TX_HTT:
3619 ret = ath10k_htt_tx(htt, txmode, skb);
3620 break;
3621 case ATH10K_MAC_TX_HTT_MGMT:
3622 ret = ath10k_htt_mgmt_tx(htt, skb);
3623 break;
3624 case ATH10K_MAC_TX_WMI_MGMT:
3625 ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
3626 break;
3627 case ATH10K_MAC_TX_UNKNOWN:
3628 WARN_ON_ONCE(1);
3629 ret = -EINVAL;
3630 break;
3631 }
3632
3633 if (ret) {
3634 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
3635 ret);
3636 ieee80211_free_txskb(ar->hw, skb);
3637 }
3638
3639 return ret;
3640 }
3641
3642 /* This function consumes the sk_buff regardless of return value as far as
3643 * caller is concerned so no freeing is necessary afterwards.
3644 */
ath10k_mac_tx(struct ath10k * ar,struct ieee80211_vif * vif,enum ath10k_hw_txrx_mode txmode,enum ath10k_mac_tx_path txpath,struct sk_buff * skb)3645 static int ath10k_mac_tx(struct ath10k *ar,
3646 struct ieee80211_vif *vif,
3647 enum ath10k_hw_txrx_mode txmode,
3648 enum ath10k_mac_tx_path txpath,
3649 struct sk_buff *skb)
3650 {
3651 struct ieee80211_hw *hw = ar->hw;
3652 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3653 int ret;
3654
3655 /* We should disable CCK RATE due to P2P */
3656 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3657 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3658
3659 switch (txmode) {
3660 case ATH10K_HW_TXRX_MGMT:
3661 case ATH10K_HW_TXRX_NATIVE_WIFI:
3662 ath10k_tx_h_nwifi(hw, skb);
3663 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
3664 ath10k_tx_h_seq_no(vif, skb);
3665 break;
3666 case ATH10K_HW_TXRX_ETHERNET:
3667 ath10k_tx_h_8023(skb);
3668 break;
3669 case ATH10K_HW_TXRX_RAW:
3670 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3671 WARN_ON_ONCE(1);
3672 ieee80211_free_txskb(hw, skb);
3673 return -ENOTSUPP;
3674 }
3675 }
3676
3677 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3678 if (!ath10k_mac_tx_frm_has_freq(ar)) {
3679 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
3680 skb);
3681
3682 skb_queue_tail(&ar->offchan_tx_queue, skb);
3683 ieee80211_queue_work(hw, &ar->offchan_tx_work);
3684 return 0;
3685 }
3686 }
3687
3688 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
3689 if (ret) {
3690 ath10k_warn(ar, "failed to submit frame: %d\n", ret);
3691 return ret;
3692 }
3693
3694 return 0;
3695 }
3696
ath10k_offchan_tx_purge(struct ath10k * ar)3697 void ath10k_offchan_tx_purge(struct ath10k *ar)
3698 {
3699 struct sk_buff *skb;
3700
3701 for (;;) {
3702 skb = skb_dequeue(&ar->offchan_tx_queue);
3703 if (!skb)
3704 break;
3705
3706 ieee80211_free_txskb(ar->hw, skb);
3707 }
3708 }
3709
ath10k_offchan_tx_work(struct work_struct * work)3710 void ath10k_offchan_tx_work(struct work_struct *work)
3711 {
3712 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
3713 struct ath10k_peer *peer;
3714 struct ath10k_vif *arvif;
3715 enum ath10k_hw_txrx_mode txmode;
3716 enum ath10k_mac_tx_path txpath;
3717 struct ieee80211_hdr *hdr;
3718 struct ieee80211_vif *vif;
3719 struct ieee80211_sta *sta;
3720 struct sk_buff *skb;
3721 const u8 *peer_addr;
3722 int vdev_id;
3723 int ret;
3724 unsigned long time_left;
3725 bool tmp_peer_created = false;
3726
3727 /* FW requirement: We must create a peer before FW will send out
3728 * an offchannel frame. Otherwise the frame will be stuck and
3729 * never transmitted. We delete the peer upon tx completion.
3730 * It is unlikely that a peer for offchannel tx will already be
3731 * present. However it may be in some rare cases so account for that.
3732 * Otherwise we might remove a legitimate peer and break stuff.
3733 */
3734
3735 for (;;) {
3736 skb = skb_dequeue(&ar->offchan_tx_queue);
3737 if (!skb)
3738 break;
3739
3740 mutex_lock(&ar->conf_mutex);
3741
3742 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
3743 skb);
3744
3745 hdr = (struct ieee80211_hdr *)skb->data;
3746 peer_addr = ieee80211_get_DA(hdr);
3747
3748 spin_lock_bh(&ar->data_lock);
3749 vdev_id = ar->scan.vdev_id;
3750 peer = ath10k_peer_find(ar, vdev_id, peer_addr);
3751 spin_unlock_bh(&ar->data_lock);
3752
3753 if (peer)
3754 /* FIXME: should this use ath10k_warn()? */
3755 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
3756 peer_addr, vdev_id);
3757
3758 if (!peer) {
3759 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
3760 peer_addr,
3761 WMI_PEER_TYPE_DEFAULT);
3762 if (ret)
3763 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
3764 peer_addr, vdev_id, ret);
3765 tmp_peer_created = (ret == 0);
3766 }
3767
3768 spin_lock_bh(&ar->data_lock);
3769 reinit_completion(&ar->offchan_tx_completed);
3770 ar->offchan_tx_skb = skb;
3771 spin_unlock_bh(&ar->data_lock);
3772
3773 /* It's safe to access vif and sta - conf_mutex guarantees that
3774 * sta_state() and remove_interface() are locked exclusively
3775 * out wrt to this offchannel worker.
3776 */
3777 arvif = ath10k_get_arvif(ar, vdev_id);
3778 if (arvif) {
3779 vif = arvif->vif;
3780 sta = ieee80211_find_sta(vif, peer_addr);
3781 } else {
3782 vif = NULL;
3783 sta = NULL;
3784 }
3785
3786 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3787 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3788
3789 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
3790 if (ret) {
3791 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3792 ret);
3793 /* not serious */
3794 }
3795
3796 time_left =
3797 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
3798 if (time_left == 0)
3799 ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
3800 skb);
3801
3802 if (!peer && tmp_peer_created) {
3803 ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
3804 if (ret)
3805 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
3806 peer_addr, vdev_id, ret);
3807 }
3808
3809 mutex_unlock(&ar->conf_mutex);
3810 }
3811 }
3812
ath10k_mgmt_over_wmi_tx_purge(struct ath10k * ar)3813 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
3814 {
3815 struct sk_buff *skb;
3816
3817 for (;;) {
3818 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3819 if (!skb)
3820 break;
3821
3822 ieee80211_free_txskb(ar->hw, skb);
3823 }
3824 }
3825
ath10k_mgmt_over_wmi_tx_work(struct work_struct * work)3826 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3827 {
3828 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
3829 struct sk_buff *skb;
3830 dma_addr_t paddr;
3831 int ret;
3832
3833 for (;;) {
3834 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3835 if (!skb)
3836 break;
3837
3838 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
3839 ar->running_fw->fw_file.fw_features)) {
3840 paddr = dma_map_single(ar->dev, skb->data,
3841 skb->len, DMA_TO_DEVICE);
3842 if (!paddr)
3843 continue;
3844 ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
3845 if (ret) {
3846 ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
3847 ret);
3848 dma_unmap_single(ar->dev, paddr, skb->len,
3849 DMA_FROM_DEVICE);
3850 ieee80211_free_txskb(ar->hw, skb);
3851 }
3852 } else {
3853 ret = ath10k_wmi_mgmt_tx(ar, skb);
3854 if (ret) {
3855 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
3856 ret);
3857 ieee80211_free_txskb(ar->hw, skb);
3858 }
3859 }
3860 }
3861 }
3862
ath10k_mac_txq_init(struct ieee80211_txq * txq)3863 static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
3864 {
3865 struct ath10k_txq *artxq;
3866
3867 if (!txq)
3868 return;
3869
3870 artxq = (void *)txq->drv_priv;
3871 INIT_LIST_HEAD(&artxq->list);
3872 }
3873
ath10k_mac_txq_unref(struct ath10k * ar,struct ieee80211_txq * txq)3874 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
3875 {
3876 struct ath10k_txq *artxq;
3877 struct ath10k_skb_cb *cb;
3878 struct sk_buff *msdu;
3879 int msdu_id;
3880
3881 if (!txq)
3882 return;
3883
3884 artxq = (void *)txq->drv_priv;
3885 spin_lock_bh(&ar->txqs_lock);
3886 if (!list_empty(&artxq->list))
3887 list_del_init(&artxq->list);
3888 spin_unlock_bh(&ar->txqs_lock);
3889
3890 spin_lock_bh(&ar->htt.tx_lock);
3891 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
3892 cb = ATH10K_SKB_CB(msdu);
3893 if (cb->txq == txq)
3894 cb->txq = NULL;
3895 }
3896 spin_unlock_bh(&ar->htt.tx_lock);
3897 }
3898
ath10k_mac_txq_lookup(struct ath10k * ar,u16 peer_id,u8 tid)3899 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
3900 u16 peer_id,
3901 u8 tid)
3902 {
3903 struct ath10k_peer *peer;
3904
3905 lockdep_assert_held(&ar->data_lock);
3906
3907 peer = ar->peer_map[peer_id];
3908 if (!peer)
3909 return NULL;
3910
3911 if (peer->removed)
3912 return NULL;
3913
3914 if (peer->sta)
3915 return peer->sta->txq[tid];
3916 else if (peer->vif)
3917 return peer->vif->txq;
3918 else
3919 return NULL;
3920 }
3921
ath10k_mac_tx_can_push(struct ieee80211_hw * hw,struct ieee80211_txq * txq)3922 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
3923 struct ieee80211_txq *txq)
3924 {
3925 struct ath10k *ar = hw->priv;
3926 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3927
3928 /* No need to get locks */
3929
3930 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
3931 return true;
3932
3933 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
3934 return true;
3935
3936 if (artxq->num_fw_queued < artxq->num_push_allowed)
3937 return true;
3938
3939 return false;
3940 }
3941
ath10k_mac_tx_push_txq(struct ieee80211_hw * hw,struct ieee80211_txq * txq)3942 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3943 struct ieee80211_txq *txq)
3944 {
3945 struct ath10k *ar = hw->priv;
3946 struct ath10k_htt *htt = &ar->htt;
3947 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3948 struct ieee80211_vif *vif = txq->vif;
3949 struct ieee80211_sta *sta = txq->sta;
3950 enum ath10k_hw_txrx_mode txmode;
3951 enum ath10k_mac_tx_path txpath;
3952 struct sk_buff *skb;
3953 struct ieee80211_hdr *hdr;
3954 size_t skb_len;
3955 bool is_mgmt, is_presp;
3956 int ret;
3957
3958 spin_lock_bh(&ar->htt.tx_lock);
3959 ret = ath10k_htt_tx_inc_pending(htt);
3960 spin_unlock_bh(&ar->htt.tx_lock);
3961
3962 if (ret)
3963 return ret;
3964
3965 skb = ieee80211_tx_dequeue(hw, txq);
3966 if (!skb) {
3967 spin_lock_bh(&ar->htt.tx_lock);
3968 ath10k_htt_tx_dec_pending(htt);
3969 spin_unlock_bh(&ar->htt.tx_lock);
3970
3971 return -ENOENT;
3972 }
3973
3974 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3975
3976 skb_len = skb->len;
3977 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3978 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3979 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
3980
3981 if (is_mgmt) {
3982 hdr = (struct ieee80211_hdr *)skb->data;
3983 is_presp = ieee80211_is_probe_resp(hdr->frame_control);
3984
3985 spin_lock_bh(&ar->htt.tx_lock);
3986 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
3987
3988 if (ret) {
3989 ath10k_htt_tx_dec_pending(htt);
3990 spin_unlock_bh(&ar->htt.tx_lock);
3991 return ret;
3992 }
3993 spin_unlock_bh(&ar->htt.tx_lock);
3994 }
3995
3996 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
3997 if (unlikely(ret)) {
3998 ath10k_warn(ar, "failed to push frame: %d\n", ret);
3999
4000 spin_lock_bh(&ar->htt.tx_lock);
4001 ath10k_htt_tx_dec_pending(htt);
4002 if (is_mgmt)
4003 ath10k_htt_tx_mgmt_dec_pending(htt);
4004 spin_unlock_bh(&ar->htt.tx_lock);
4005
4006 return ret;
4007 }
4008
4009 spin_lock_bh(&ar->htt.tx_lock);
4010 artxq->num_fw_queued++;
4011 spin_unlock_bh(&ar->htt.tx_lock);
4012
4013 return skb_len;
4014 }
4015
ath10k_mac_tx_push_pending(struct ath10k * ar)4016 void ath10k_mac_tx_push_pending(struct ath10k *ar)
4017 {
4018 struct ieee80211_hw *hw = ar->hw;
4019 struct ieee80211_txq *txq;
4020 struct ath10k_txq *artxq;
4021 struct ath10k_txq *last;
4022 int ret;
4023 int max;
4024
4025 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
4026 return;
4027
4028 spin_lock_bh(&ar->txqs_lock);
4029 rcu_read_lock();
4030
4031 last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
4032 while (!list_empty(&ar->txqs)) {
4033 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
4034 txq = container_of((void *)artxq, struct ieee80211_txq,
4035 drv_priv);
4036
4037 /* Prevent aggressive sta/tid taking over tx queue */
4038 max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
4039 ret = 0;
4040 while (ath10k_mac_tx_can_push(hw, txq) && max--) {
4041 ret = ath10k_mac_tx_push_txq(hw, txq);
4042 if (ret < 0)
4043 break;
4044 }
4045
4046 list_del_init(&artxq->list);
4047 if (ret != -ENOENT)
4048 list_add_tail(&artxq->list, &ar->txqs);
4049
4050 ath10k_htt_tx_txq_update(hw, txq);
4051
4052 if (artxq == last || (ret < 0 && ret != -ENOENT))
4053 break;
4054 }
4055
4056 rcu_read_unlock();
4057 spin_unlock_bh(&ar->txqs_lock);
4058 }
4059 EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
4060
4061 /************/
4062 /* Scanning */
4063 /************/
4064
__ath10k_scan_finish(struct ath10k * ar)4065 void __ath10k_scan_finish(struct ath10k *ar)
4066 {
4067 lockdep_assert_held(&ar->data_lock);
4068
4069 switch (ar->scan.state) {
4070 case ATH10K_SCAN_IDLE:
4071 break;
4072 case ATH10K_SCAN_RUNNING:
4073 case ATH10K_SCAN_ABORTING:
4074 if (!ar->scan.is_roc) {
4075 struct cfg80211_scan_info info = {
4076 .aborted = (ar->scan.state ==
4077 ATH10K_SCAN_ABORTING),
4078 };
4079
4080 ieee80211_scan_completed(ar->hw, &info);
4081 } else if (ar->scan.roc_notify) {
4082 ieee80211_remain_on_channel_expired(ar->hw);
4083 }
4084 /* fall through */
4085 case ATH10K_SCAN_STARTING:
4086 ar->scan.state = ATH10K_SCAN_IDLE;
4087 ar->scan_channel = NULL;
4088 ar->scan.roc_freq = 0;
4089 ath10k_offchan_tx_purge(ar);
4090 cancel_delayed_work(&ar->scan.timeout);
4091 complete(&ar->scan.completed);
4092 break;
4093 }
4094 }
4095
ath10k_scan_finish(struct ath10k * ar)4096 void ath10k_scan_finish(struct ath10k *ar)
4097 {
4098 spin_lock_bh(&ar->data_lock);
4099 __ath10k_scan_finish(ar);
4100 spin_unlock_bh(&ar->data_lock);
4101 }
4102
ath10k_scan_stop(struct ath10k * ar)4103 static int ath10k_scan_stop(struct ath10k *ar)
4104 {
4105 struct wmi_stop_scan_arg arg = {
4106 .req_id = 1, /* FIXME */
4107 .req_type = WMI_SCAN_STOP_ONE,
4108 .u.scan_id = ATH10K_SCAN_ID,
4109 };
4110 int ret;
4111
4112 lockdep_assert_held(&ar->conf_mutex);
4113
4114 ret = ath10k_wmi_stop_scan(ar, &arg);
4115 if (ret) {
4116 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
4117 goto out;
4118 }
4119
4120 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
4121 if (ret == 0) {
4122 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
4123 ret = -ETIMEDOUT;
4124 } else if (ret > 0) {
4125 ret = 0;
4126 }
4127
4128 out:
4129 /* Scan state should be updated upon scan completion but in case
4130 * firmware fails to deliver the event (for whatever reason) it is
4131 * desired to clean up scan state anyway. Firmware may have just
4132 * dropped the scan completion event delivery due to transport pipe
4133 * being overflown with data and/or it can recover on its own before
4134 * next scan request is submitted.
4135 */
4136 spin_lock_bh(&ar->data_lock);
4137 if (ar->scan.state != ATH10K_SCAN_IDLE)
4138 __ath10k_scan_finish(ar);
4139 spin_unlock_bh(&ar->data_lock);
4140
4141 return ret;
4142 }
4143
ath10k_scan_abort(struct ath10k * ar)4144 static void ath10k_scan_abort(struct ath10k *ar)
4145 {
4146 int ret;
4147
4148 lockdep_assert_held(&ar->conf_mutex);
4149
4150 spin_lock_bh(&ar->data_lock);
4151
4152 switch (ar->scan.state) {
4153 case ATH10K_SCAN_IDLE:
4154 /* This can happen if timeout worker kicked in and called
4155 * abortion while scan completion was being processed.
4156 */
4157 break;
4158 case ATH10K_SCAN_STARTING:
4159 case ATH10K_SCAN_ABORTING:
4160 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
4161 ath10k_scan_state_str(ar->scan.state),
4162 ar->scan.state);
4163 break;
4164 case ATH10K_SCAN_RUNNING:
4165 ar->scan.state = ATH10K_SCAN_ABORTING;
4166 spin_unlock_bh(&ar->data_lock);
4167
4168 ret = ath10k_scan_stop(ar);
4169 if (ret)
4170 ath10k_warn(ar, "failed to abort scan: %d\n", ret);
4171
4172 spin_lock_bh(&ar->data_lock);
4173 break;
4174 }
4175
4176 spin_unlock_bh(&ar->data_lock);
4177 }
4178
ath10k_scan_timeout_work(struct work_struct * work)4179 void ath10k_scan_timeout_work(struct work_struct *work)
4180 {
4181 struct ath10k *ar = container_of(work, struct ath10k,
4182 scan.timeout.work);
4183
4184 mutex_lock(&ar->conf_mutex);
4185 ath10k_scan_abort(ar);
4186 mutex_unlock(&ar->conf_mutex);
4187 }
4188
ath10k_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)4189 static int ath10k_start_scan(struct ath10k *ar,
4190 const struct wmi_start_scan_arg *arg)
4191 {
4192 int ret;
4193
4194 lockdep_assert_held(&ar->conf_mutex);
4195
4196 ret = ath10k_wmi_start_scan(ar, arg);
4197 if (ret)
4198 return ret;
4199
4200 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
4201 if (ret == 0) {
4202 ret = ath10k_scan_stop(ar);
4203 if (ret)
4204 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
4205
4206 return -ETIMEDOUT;
4207 }
4208
4209 /* If we failed to start the scan, return error code at
4210 * this point. This is probably due to some issue in the
4211 * firmware, but no need to wedge the driver due to that...
4212 */
4213 spin_lock_bh(&ar->data_lock);
4214 if (ar->scan.state == ATH10K_SCAN_IDLE) {
4215 spin_unlock_bh(&ar->data_lock);
4216 return -EINVAL;
4217 }
4218 spin_unlock_bh(&ar->data_lock);
4219
4220 return 0;
4221 }
4222
4223 /**********************/
4224 /* mac80211 callbacks */
4225 /**********************/
4226
ath10k_mac_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)4227 static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
4228 struct ieee80211_tx_control *control,
4229 struct sk_buff *skb)
4230 {
4231 struct ath10k *ar = hw->priv;
4232 struct ath10k_htt *htt = &ar->htt;
4233 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4234 struct ieee80211_vif *vif = info->control.vif;
4235 struct ieee80211_sta *sta = control->sta;
4236 struct ieee80211_txq *txq = NULL;
4237 struct ieee80211_hdr *hdr = (void *)skb->data;
4238 enum ath10k_hw_txrx_mode txmode;
4239 enum ath10k_mac_tx_path txpath;
4240 bool is_htt;
4241 bool is_mgmt;
4242 bool is_presp;
4243 int ret;
4244
4245 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
4246
4247 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4248 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4249 is_htt = (txpath == ATH10K_MAC_TX_HTT ||
4250 txpath == ATH10K_MAC_TX_HTT_MGMT);
4251 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4252
4253 if (is_htt) {
4254 spin_lock_bh(&ar->htt.tx_lock);
4255 is_presp = ieee80211_is_probe_resp(hdr->frame_control);
4256
4257 ret = ath10k_htt_tx_inc_pending(htt);
4258 if (ret) {
4259 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
4260 ret);
4261 spin_unlock_bh(&ar->htt.tx_lock);
4262 ieee80211_free_txskb(ar->hw, skb);
4263 return;
4264 }
4265
4266 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4267 if (ret) {
4268 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
4269 ret);
4270 ath10k_htt_tx_dec_pending(htt);
4271 spin_unlock_bh(&ar->htt.tx_lock);
4272 ieee80211_free_txskb(ar->hw, skb);
4273 return;
4274 }
4275 spin_unlock_bh(&ar->htt.tx_lock);
4276 }
4277
4278 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
4279 if (ret) {
4280 ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
4281 if (is_htt) {
4282 spin_lock_bh(&ar->htt.tx_lock);
4283 ath10k_htt_tx_dec_pending(htt);
4284 if (is_mgmt)
4285 ath10k_htt_tx_mgmt_dec_pending(htt);
4286 spin_unlock_bh(&ar->htt.tx_lock);
4287 }
4288 return;
4289 }
4290 }
4291
ath10k_mac_op_wake_tx_queue(struct ieee80211_hw * hw,struct ieee80211_txq * txq)4292 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4293 struct ieee80211_txq *txq)
4294 {
4295 struct ath10k *ar = hw->priv;
4296 struct ath10k_txq *artxq = (void *)txq->drv_priv;
4297 struct ieee80211_txq *f_txq;
4298 struct ath10k_txq *f_artxq;
4299 int ret = 0;
4300 int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
4301
4302 spin_lock_bh(&ar->txqs_lock);
4303 if (list_empty(&artxq->list))
4304 list_add_tail(&artxq->list, &ar->txqs);
4305
4306 f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
4307 f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
4308 list_del_init(&f_artxq->list);
4309
4310 while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
4311 ret = ath10k_mac_tx_push_txq(hw, f_txq);
4312 if (ret < 0)
4313 break;
4314 }
4315 if (ret != -ENOENT)
4316 list_add_tail(&f_artxq->list, &ar->txqs);
4317 spin_unlock_bh(&ar->txqs_lock);
4318
4319 ath10k_htt_tx_txq_update(hw, f_txq);
4320 ath10k_htt_tx_txq_update(hw, txq);
4321 }
4322
4323 /* Must not be called with conf_mutex held as workers can use that also. */
ath10k_drain_tx(struct ath10k * ar)4324 void ath10k_drain_tx(struct ath10k *ar)
4325 {
4326 /* make sure rcu-protected mac80211 tx path itself is drained */
4327 synchronize_net();
4328
4329 ath10k_offchan_tx_purge(ar);
4330 ath10k_mgmt_over_wmi_tx_purge(ar);
4331
4332 cancel_work_sync(&ar->offchan_tx_work);
4333 cancel_work_sync(&ar->wmi_mgmt_tx_work);
4334 }
4335
ath10k_halt(struct ath10k * ar)4336 void ath10k_halt(struct ath10k *ar)
4337 {
4338 struct ath10k_vif *arvif;
4339
4340 lockdep_assert_held(&ar->conf_mutex);
4341
4342 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
4343 ar->filter_flags = 0;
4344 ar->monitor = false;
4345 ar->monitor_arvif = NULL;
4346
4347 if (ar->monitor_started)
4348 ath10k_monitor_stop(ar);
4349
4350 ar->monitor_started = false;
4351 ar->tx_paused = 0;
4352
4353 ath10k_scan_finish(ar);
4354 ath10k_peer_cleanup_all(ar);
4355 ath10k_stop_radar_confirmation(ar);
4356 ath10k_core_stop(ar);
4357 ath10k_hif_power_down(ar);
4358
4359 spin_lock_bh(&ar->data_lock);
4360 list_for_each_entry(arvif, &ar->arvifs, list)
4361 ath10k_mac_vif_beacon_cleanup(arvif);
4362 spin_unlock_bh(&ar->data_lock);
4363 }
4364
ath10k_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)4365 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4366 {
4367 struct ath10k *ar = hw->priv;
4368
4369 mutex_lock(&ar->conf_mutex);
4370
4371 *tx_ant = ar->cfg_tx_chainmask;
4372 *rx_ant = ar->cfg_rx_chainmask;
4373
4374 mutex_unlock(&ar->conf_mutex);
4375
4376 return 0;
4377 }
4378
ath10k_check_chain_mask(struct ath10k * ar,u32 cm,const char * dbg)4379 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4380 {
4381 /* It is not clear that allowing gaps in chainmask
4382 * is helpful. Probably it will not do what user
4383 * is hoping for, so warn in that case.
4384 */
4385 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4386 return;
4387
4388 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
4389 dbg, cm);
4390 }
4391
ath10k_mac_get_vht_cap_bf_sts(struct ath10k * ar)4392 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4393 {
4394 int nsts = ar->vht_cap_info;
4395
4396 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4397 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4398
4399 /* If firmware does not deliver to host number of space-time
4400 * streams supported, assume it support up to 4 BF STS and return
4401 * the value for VHT CAP: nsts-1)
4402 */
4403 if (nsts == 0)
4404 return 3;
4405
4406 return nsts;
4407 }
4408
ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k * ar)4409 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4410 {
4411 int sound_dim = ar->vht_cap_info;
4412
4413 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4414 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4415
4416 /* If the sounding dimension is not advertised by the firmware,
4417 * let's use a default value of 1
4418 */
4419 if (sound_dim == 0)
4420 return 1;
4421
4422 return sound_dim;
4423 }
4424
ath10k_create_vht_cap(struct ath10k * ar)4425 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4426 {
4427 struct ieee80211_sta_vht_cap vht_cap = {0};
4428 struct ath10k_hw_params *hw = &ar->hw_params;
4429 u16 mcs_map;
4430 u32 val;
4431 int i;
4432
4433 vht_cap.vht_supported = 1;
4434 vht_cap.cap = ar->vht_cap_info;
4435
4436 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4437 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4438 val = ath10k_mac_get_vht_cap_bf_sts(ar);
4439 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4440 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4441
4442 vht_cap.cap |= val;
4443 }
4444
4445 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4446 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4447 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4448 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4449 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4450
4451 vht_cap.cap |= val;
4452 }
4453
4454 /* Currently the firmware seems to be buggy, don't enable 80+80
4455 * mode until that's resolved.
4456 */
4457 if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) &&
4458 (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0)
4459 vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
4460
4461 mcs_map = 0;
4462 for (i = 0; i < 8; i++) {
4463 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4464 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4465 else
4466 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4467 }
4468
4469 if (ar->cfg_tx_chainmask <= 1)
4470 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
4471
4472 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4473 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4474
4475 /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do
4476 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give
4477 * user-space a clue if that is the case.
4478 */
4479 if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) &&
4480 (hw->vht160_mcs_rx_highest != 0 ||
4481 hw->vht160_mcs_tx_highest != 0)) {
4482 vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest);
4483 vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest);
4484 }
4485
4486 return vht_cap;
4487 }
4488
ath10k_get_ht_cap(struct ath10k * ar)4489 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4490 {
4491 int i;
4492 struct ieee80211_sta_ht_cap ht_cap = {0};
4493
4494 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4495 return ht_cap;
4496
4497 ht_cap.ht_supported = 1;
4498 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4499 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4500 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4501 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4502 ht_cap.cap |=
4503 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
4504
4505 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4506 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4507
4508 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4509 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4510
4511 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4512 u32 smps;
4513
4514 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
4515 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4516
4517 ht_cap.cap |= smps;
4518 }
4519
4520 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
4521 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4522
4523 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4524 u32 stbc;
4525
4526 stbc = ar->ht_cap_info;
4527 stbc &= WMI_HT_CAP_RX_STBC;
4528 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4529 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4530 stbc &= IEEE80211_HT_CAP_RX_STBC;
4531
4532 ht_cap.cap |= stbc;
4533 }
4534
4535 if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
4536 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4537
4538 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4539 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4540
4541 /* max AMSDU is implicitly taken from vht_cap_info */
4542 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4543 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4544
4545 for (i = 0; i < ar->num_rf_chains; i++) {
4546 if (ar->cfg_rx_chainmask & BIT(i))
4547 ht_cap.mcs.rx_mask[i] = 0xFF;
4548 }
4549
4550 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4551
4552 return ht_cap;
4553 }
4554
ath10k_mac_setup_ht_vht_cap(struct ath10k * ar)4555 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4556 {
4557 struct ieee80211_supported_band *band;
4558 struct ieee80211_sta_vht_cap vht_cap;
4559 struct ieee80211_sta_ht_cap ht_cap;
4560
4561 ht_cap = ath10k_get_ht_cap(ar);
4562 vht_cap = ath10k_create_vht_cap(ar);
4563
4564 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4565 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4566 band->ht_cap = ht_cap;
4567 }
4568 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
4569 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
4570 band->ht_cap = ht_cap;
4571 band->vht_cap = vht_cap;
4572 }
4573 }
4574
__ath10k_set_antenna(struct ath10k * ar,u32 tx_ant,u32 rx_ant)4575 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
4576 {
4577 int ret;
4578
4579 lockdep_assert_held(&ar->conf_mutex);
4580
4581 ath10k_check_chain_mask(ar, tx_ant, "tx");
4582 ath10k_check_chain_mask(ar, rx_ant, "rx");
4583
4584 ar->cfg_tx_chainmask = tx_ant;
4585 ar->cfg_rx_chainmask = rx_ant;
4586
4587 if ((ar->state != ATH10K_STATE_ON) &&
4588 (ar->state != ATH10K_STATE_RESTARTED))
4589 return 0;
4590
4591 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
4592 tx_ant);
4593 if (ret) {
4594 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
4595 ret, tx_ant);
4596 return ret;
4597 }
4598
4599 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
4600 rx_ant);
4601 if (ret) {
4602 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
4603 ret, rx_ant);
4604 return ret;
4605 }
4606
4607 /* Reload HT/VHT capability */
4608 ath10k_mac_setup_ht_vht_cap(ar);
4609
4610 return 0;
4611 }
4612
ath10k_set_antenna(struct ieee80211_hw * hw,u32 tx_ant,u32 rx_ant)4613 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
4614 {
4615 struct ath10k *ar = hw->priv;
4616 int ret;
4617
4618 mutex_lock(&ar->conf_mutex);
4619 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
4620 mutex_unlock(&ar->conf_mutex);
4621 return ret;
4622 }
4623
ath10k_start(struct ieee80211_hw * hw)4624 static int ath10k_start(struct ieee80211_hw *hw)
4625 {
4626 struct ath10k *ar = hw->priv;
4627 u32 param;
4628 int ret = 0;
4629
4630 /*
4631 * This makes sense only when restarting hw. It is harmless to call
4632 * unconditionally. This is necessary to make sure no HTT/WMI tx
4633 * commands will be submitted while restarting.
4634 */
4635 ath10k_drain_tx(ar);
4636
4637 mutex_lock(&ar->conf_mutex);
4638
4639 switch (ar->state) {
4640 case ATH10K_STATE_OFF:
4641 ar->state = ATH10K_STATE_ON;
4642 break;
4643 case ATH10K_STATE_RESTARTING:
4644 ar->state = ATH10K_STATE_RESTARTED;
4645 break;
4646 case ATH10K_STATE_ON:
4647 case ATH10K_STATE_RESTARTED:
4648 case ATH10K_STATE_WEDGED:
4649 WARN_ON(1);
4650 ret = -EINVAL;
4651 goto err;
4652 case ATH10K_STATE_UTF:
4653 ret = -EBUSY;
4654 goto err;
4655 }
4656
4657 ret = ath10k_hif_power_up(ar);
4658 if (ret) {
4659 ath10k_err(ar, "Could not init hif: %d\n", ret);
4660 goto err_off;
4661 }
4662
4663 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
4664 &ar->normal_mode_fw);
4665 if (ret) {
4666 ath10k_err(ar, "Could not init core: %d\n", ret);
4667 goto err_power_down;
4668 }
4669
4670 param = ar->wmi.pdev_param->pmf_qos;
4671 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4672 if (ret) {
4673 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
4674 goto err_core_stop;
4675 }
4676
4677 param = ar->wmi.pdev_param->dynamic_bw;
4678 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4679 if (ret) {
4680 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
4681 goto err_core_stop;
4682 }
4683
4684 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4685 ret = ath10k_wmi_adaptive_qcs(ar, true);
4686 if (ret) {
4687 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
4688 ret);
4689 goto err_core_stop;
4690 }
4691 }
4692
4693 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
4694 param = ar->wmi.pdev_param->burst_enable;
4695 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4696 if (ret) {
4697 ath10k_warn(ar, "failed to disable burst: %d\n", ret);
4698 goto err_core_stop;
4699 }
4700 }
4701
4702 param = ar->wmi.pdev_param->idle_ps_config;
4703 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4704 if (ret && ret != -EOPNOTSUPP) {
4705 ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret);
4706 goto err_core_stop;
4707 }
4708
4709 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
4710
4711 /*
4712 * By default FW set ARP frames ac to voice (6). In that case ARP
4713 * exchange is not working properly for UAPSD enabled AP. ARP requests
4714 * which arrives with access category 0 are processed by network stack
4715 * and send back with access category 0, but FW changes access category
4716 * to 6. Set ARP frames access category to best effort (0) solves
4717 * this problem.
4718 */
4719
4720 param = ar->wmi.pdev_param->arp_ac_override;
4721 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4722 if (ret) {
4723 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
4724 ret);
4725 goto err_core_stop;
4726 }
4727
4728 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
4729 ar->running_fw->fw_file.fw_features)) {
4730 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
4731 WMI_CCA_DETECT_LEVEL_AUTO,
4732 WMI_CCA_DETECT_MARGIN_AUTO);
4733 if (ret) {
4734 ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
4735 ret);
4736 goto err_core_stop;
4737 }
4738 }
4739
4740 param = ar->wmi.pdev_param->ani_enable;
4741 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4742 if (ret) {
4743 ath10k_warn(ar, "failed to enable ani by default: %d\n",
4744 ret);
4745 goto err_core_stop;
4746 }
4747
4748 ar->ani_enabled = true;
4749
4750 if (ath10k_peer_stats_enabled(ar)) {
4751 param = ar->wmi.pdev_param->peer_stats_update_period;
4752 ret = ath10k_wmi_pdev_set_param(ar, param,
4753 PEER_DEFAULT_STATS_UPDATE_PERIOD);
4754 if (ret) {
4755 ath10k_warn(ar,
4756 "failed to set peer stats period : %d\n",
4757 ret);
4758 goto err_core_stop;
4759 }
4760 }
4761
4762 param = ar->wmi.pdev_param->enable_btcoex;
4763 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
4764 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
4765 ar->running_fw->fw_file.fw_features)) {
4766 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4767 if (ret) {
4768 ath10k_warn(ar,
4769 "failed to set btcoex param: %d\n", ret);
4770 goto err_core_stop;
4771 }
4772 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
4773 }
4774
4775 ar->num_started_vdevs = 0;
4776 ath10k_regd_update(ar);
4777
4778 ath10k_spectral_start(ar);
4779 ath10k_thermal_set_throttling(ar);
4780
4781 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
4782
4783 mutex_unlock(&ar->conf_mutex);
4784 return 0;
4785
4786 err_core_stop:
4787 ath10k_core_stop(ar);
4788
4789 err_power_down:
4790 ath10k_hif_power_down(ar);
4791
4792 err_off:
4793 ar->state = ATH10K_STATE_OFF;
4794
4795 err:
4796 mutex_unlock(&ar->conf_mutex);
4797 return ret;
4798 }
4799
ath10k_stop(struct ieee80211_hw * hw)4800 static void ath10k_stop(struct ieee80211_hw *hw)
4801 {
4802 struct ath10k *ar = hw->priv;
4803
4804 ath10k_drain_tx(ar);
4805
4806 mutex_lock(&ar->conf_mutex);
4807 if (ar->state != ATH10K_STATE_OFF) {
4808 ath10k_halt(ar);
4809 ar->state = ATH10K_STATE_OFF;
4810 }
4811 mutex_unlock(&ar->conf_mutex);
4812
4813 cancel_work_sync(&ar->set_coverage_class_work);
4814 cancel_delayed_work_sync(&ar->scan.timeout);
4815 cancel_work_sync(&ar->restart_work);
4816 }
4817
ath10k_config_ps(struct ath10k * ar)4818 static int ath10k_config_ps(struct ath10k *ar)
4819 {
4820 struct ath10k_vif *arvif;
4821 int ret = 0;
4822
4823 lockdep_assert_held(&ar->conf_mutex);
4824
4825 list_for_each_entry(arvif, &ar->arvifs, list) {
4826 ret = ath10k_mac_vif_setup_ps(arvif);
4827 if (ret) {
4828 ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
4829 break;
4830 }
4831 }
4832
4833 return ret;
4834 }
4835
ath10k_mac_txpower_setup(struct ath10k * ar,int txpower)4836 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
4837 {
4838 int ret;
4839 u32 param;
4840
4841 lockdep_assert_held(&ar->conf_mutex);
4842
4843 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
4844
4845 param = ar->wmi.pdev_param->txpower_limit2g;
4846 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4847 if (ret) {
4848 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
4849 txpower, ret);
4850 return ret;
4851 }
4852
4853 param = ar->wmi.pdev_param->txpower_limit5g;
4854 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4855 if (ret) {
4856 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
4857 txpower, ret);
4858 return ret;
4859 }
4860
4861 return 0;
4862 }
4863
ath10k_mac_txpower_recalc(struct ath10k * ar)4864 static int ath10k_mac_txpower_recalc(struct ath10k *ar)
4865 {
4866 struct ath10k_vif *arvif;
4867 int ret, txpower = -1;
4868
4869 lockdep_assert_held(&ar->conf_mutex);
4870
4871 list_for_each_entry(arvif, &ar->arvifs, list) {
4872 if (arvif->txpower <= 0)
4873 continue;
4874
4875 if (txpower == -1)
4876 txpower = arvif->txpower;
4877 else
4878 txpower = min(txpower, arvif->txpower);
4879 }
4880
4881 if (txpower == -1)
4882 return 0;
4883
4884 ret = ath10k_mac_txpower_setup(ar, txpower);
4885 if (ret) {
4886 ath10k_warn(ar, "failed to setup tx power %d: %d\n",
4887 txpower, ret);
4888 return ret;
4889 }
4890
4891 return 0;
4892 }
4893
ath10k_config(struct ieee80211_hw * hw,u32 changed)4894 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4895 {
4896 struct ath10k *ar = hw->priv;
4897 struct ieee80211_conf *conf = &hw->conf;
4898 int ret = 0;
4899
4900 mutex_lock(&ar->conf_mutex);
4901
4902 if (changed & IEEE80211_CONF_CHANGE_PS)
4903 ath10k_config_ps(ar);
4904
4905 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
4906 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
4907 ret = ath10k_monitor_recalc(ar);
4908 if (ret)
4909 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4910 }
4911
4912 mutex_unlock(&ar->conf_mutex);
4913 return ret;
4914 }
4915
get_nss_from_chainmask(u16 chain_mask)4916 static u32 get_nss_from_chainmask(u16 chain_mask)
4917 {
4918 if ((chain_mask & 0xf) == 0xf)
4919 return 4;
4920 else if ((chain_mask & 0x7) == 0x7)
4921 return 3;
4922 else if ((chain_mask & 0x3) == 0x3)
4923 return 2;
4924 return 1;
4925 }
4926
ath10k_mac_set_txbf_conf(struct ath10k_vif * arvif)4927 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
4928 {
4929 u32 value = 0;
4930 struct ath10k *ar = arvif->ar;
4931 int nsts;
4932 int sound_dim;
4933
4934 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
4935 return 0;
4936
4937 nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
4938 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4939 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
4940 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
4941
4942 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4943 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4944 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
4945 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
4946
4947 if (!value)
4948 return 0;
4949
4950 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
4951 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
4952
4953 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
4954 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
4955 WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
4956
4957 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
4958 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
4959
4960 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
4961 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
4962 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
4963
4964 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4965 ar->wmi.vdev_param->txbf, value);
4966 }
4967
4968 /*
4969 * TODO:
4970 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
4971 * because we will send mgmt frames without CCK. This requirement
4972 * for P2P_FIND/GO_NEG should be handled by checking CCK flag
4973 * in the TX packet.
4974 */
ath10k_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4975 static int ath10k_add_interface(struct ieee80211_hw *hw,
4976 struct ieee80211_vif *vif)
4977 {
4978 struct ath10k *ar = hw->priv;
4979 struct ath10k_vif *arvif = (void *)vif->drv_priv;
4980 struct ath10k_peer *peer;
4981 enum wmi_sta_powersave_param param;
4982 int ret = 0;
4983 u32 value;
4984 int bit;
4985 int i;
4986 u32 vdev_param;
4987
4988 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
4989
4990 mutex_lock(&ar->conf_mutex);
4991
4992 memset(arvif, 0, sizeof(*arvif));
4993 ath10k_mac_txq_init(vif->txq);
4994
4995 arvif->ar = ar;
4996 arvif->vif = vif;
4997
4998 INIT_LIST_HEAD(&arvif->list);
4999 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
5000 INIT_DELAYED_WORK(&arvif->connection_loss_work,
5001 ath10k_mac_vif_sta_connection_loss_work);
5002
5003 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
5004 arvif->bitrate_mask.control[i].legacy = 0xffffffff;
5005 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
5006 sizeof(arvif->bitrate_mask.control[i].ht_mcs));
5007 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
5008 sizeof(arvif->bitrate_mask.control[i].vht_mcs));
5009 }
5010
5011 if (ar->num_peers >= ar->max_num_peers) {
5012 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
5013 ret = -ENOBUFS;
5014 goto err;
5015 }
5016
5017 if (ar->free_vdev_map == 0) {
5018 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
5019 ret = -EBUSY;
5020 goto err;
5021 }
5022 bit = __ffs64(ar->free_vdev_map);
5023
5024 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
5025 bit, ar->free_vdev_map);
5026
5027 arvif->vdev_id = bit;
5028 arvif->vdev_subtype =
5029 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
5030
5031 switch (vif->type) {
5032 case NL80211_IFTYPE_P2P_DEVICE:
5033 arvif->vdev_type = WMI_VDEV_TYPE_STA;
5034 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5035 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
5036 break;
5037 case NL80211_IFTYPE_UNSPECIFIED:
5038 case NL80211_IFTYPE_STATION:
5039 arvif->vdev_type = WMI_VDEV_TYPE_STA;
5040 if (vif->p2p)
5041 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5042 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
5043 break;
5044 case NL80211_IFTYPE_ADHOC:
5045 arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
5046 break;
5047 case NL80211_IFTYPE_MESH_POINT:
5048 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
5049 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5050 (ar, WMI_VDEV_SUBTYPE_MESH_11S);
5051 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
5052 ret = -EINVAL;
5053 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
5054 goto err;
5055 }
5056 arvif->vdev_type = WMI_VDEV_TYPE_AP;
5057 break;
5058 case NL80211_IFTYPE_AP:
5059 arvif->vdev_type = WMI_VDEV_TYPE_AP;
5060
5061 if (vif->p2p)
5062 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5063 (ar, WMI_VDEV_SUBTYPE_P2P_GO);
5064 break;
5065 case NL80211_IFTYPE_MONITOR:
5066 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
5067 break;
5068 default:
5069 WARN_ON(1);
5070 break;
5071 }
5072
5073 /* Using vdev_id as queue number will make it very easy to do per-vif
5074 * tx queue locking. This shouldn't wrap due to interface combinations
5075 * but do a modulo for correctness sake and prevent using offchannel tx
5076 * queues for regular vif tx.
5077 */
5078 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
5079 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
5080 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
5081
5082 /* Some firmware revisions don't wait for beacon tx completion before
5083 * sending another SWBA event. This could lead to hardware using old
5084 * (freed) beacon data in some cases, e.g. tx credit starvation
5085 * combined with missed TBTT. This is very very rare.
5086 *
5087 * On non-IOMMU-enabled hosts this could be a possible security issue
5088 * because hw could beacon some random data on the air. On
5089 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
5090 * device would crash.
5091 *
5092 * Since there are no beacon tx completions (implicit nor explicit)
5093 * propagated to host the only workaround for this is to allocate a
5094 * DMA-coherent buffer for a lifetime of a vif and use it for all
5095 * beacon tx commands. Worst case for this approach is some beacons may
5096 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
5097 */
5098 if (vif->type == NL80211_IFTYPE_ADHOC ||
5099 vif->type == NL80211_IFTYPE_MESH_POINT ||
5100 vif->type == NL80211_IFTYPE_AP) {
5101 arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
5102 IEEE80211_MAX_FRAME_LEN,
5103 &arvif->beacon_paddr,
5104 GFP_ATOMIC);
5105 if (!arvif->beacon_buf) {
5106 ret = -ENOMEM;
5107 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
5108 ret);
5109 goto err;
5110 }
5111 }
5112 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
5113 arvif->nohwcrypt = true;
5114
5115 if (arvif->nohwcrypt &&
5116 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
5117 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
5118 goto err;
5119 }
5120
5121 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
5122 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
5123 arvif->beacon_buf ? "single-buf" : "per-skb");
5124
5125 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
5126 arvif->vdev_subtype, vif->addr);
5127 if (ret) {
5128 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
5129 arvif->vdev_id, ret);
5130 goto err;
5131 }
5132
5133 ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
5134 spin_lock_bh(&ar->data_lock);
5135 list_add(&arvif->list, &ar->arvifs);
5136 spin_unlock_bh(&ar->data_lock);
5137
5138 /* It makes no sense to have firmware do keepalives. mac80211 already
5139 * takes care of this with idle connection polling.
5140 */
5141 ret = ath10k_mac_vif_disable_keepalive(arvif);
5142 if (ret) {
5143 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
5144 arvif->vdev_id, ret);
5145 goto err_vdev_delete;
5146 }
5147
5148 arvif->def_wep_key_idx = -1;
5149
5150 vdev_param = ar->wmi.vdev_param->tx_encap_type;
5151 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5152 ATH10K_HW_TXRX_NATIVE_WIFI);
5153 /* 10.X firmware does not support this VDEV parameter. Do not warn */
5154 if (ret && ret != -EOPNOTSUPP) {
5155 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
5156 arvif->vdev_id, ret);
5157 goto err_vdev_delete;
5158 }
5159
5160 /* Configuring number of spatial stream for monitor interface is causing
5161 * target assert in qca9888 and qca6174.
5162 */
5163 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
5164 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
5165
5166 vdev_param = ar->wmi.vdev_param->nss;
5167 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5168 nss);
5169 if (ret) {
5170 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
5171 arvif->vdev_id, ar->cfg_tx_chainmask, nss,
5172 ret);
5173 goto err_vdev_delete;
5174 }
5175 }
5176
5177 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5178 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5179 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
5180 vif->addr, WMI_PEER_TYPE_DEFAULT);
5181 if (ret) {
5182 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
5183 arvif->vdev_id, ret);
5184 goto err_vdev_delete;
5185 }
5186
5187 spin_lock_bh(&ar->data_lock);
5188
5189 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
5190 if (!peer) {
5191 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5192 vif->addr, arvif->vdev_id);
5193 spin_unlock_bh(&ar->data_lock);
5194 ret = -ENOENT;
5195 goto err_peer_delete;
5196 }
5197
5198 arvif->peer_id = find_first_bit(peer->peer_ids,
5199 ATH10K_MAX_NUM_PEER_IDS);
5200
5201 spin_unlock_bh(&ar->data_lock);
5202 } else {
5203 arvif->peer_id = HTT_INVALID_PEERID;
5204 }
5205
5206 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
5207 ret = ath10k_mac_set_kickout(arvif);
5208 if (ret) {
5209 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
5210 arvif->vdev_id, ret);
5211 goto err_peer_delete;
5212 }
5213 }
5214
5215 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
5216 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
5217 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
5218 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
5219 param, value);
5220 if (ret) {
5221 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
5222 arvif->vdev_id, ret);
5223 goto err_peer_delete;
5224 }
5225
5226 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
5227 if (ret) {
5228 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
5229 arvif->vdev_id, ret);
5230 goto err_peer_delete;
5231 }
5232
5233 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
5234 if (ret) {
5235 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
5236 arvif->vdev_id, ret);
5237 goto err_peer_delete;
5238 }
5239 }
5240
5241 ret = ath10k_mac_set_txbf_conf(arvif);
5242 if (ret) {
5243 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
5244 arvif->vdev_id, ret);
5245 goto err_peer_delete;
5246 }
5247
5248 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
5249 if (ret) {
5250 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
5251 arvif->vdev_id, ret);
5252 goto err_peer_delete;
5253 }
5254
5255 arvif->txpower = vif->bss_conf.txpower;
5256 ret = ath10k_mac_txpower_recalc(ar);
5257 if (ret) {
5258 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5259 goto err_peer_delete;
5260 }
5261
5262 if (vif->type == NL80211_IFTYPE_MONITOR) {
5263 ar->monitor_arvif = arvif;
5264 ret = ath10k_monitor_recalc(ar);
5265 if (ret) {
5266 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5267 goto err_peer_delete;
5268 }
5269 }
5270
5271 spin_lock_bh(&ar->htt.tx_lock);
5272 if (!ar->tx_paused)
5273 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
5274 spin_unlock_bh(&ar->htt.tx_lock);
5275
5276 mutex_unlock(&ar->conf_mutex);
5277 return 0;
5278
5279 err_peer_delete:
5280 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5281 arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
5282 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
5283
5284 err_vdev_delete:
5285 ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5286 ar->free_vdev_map |= 1LL << arvif->vdev_id;
5287 spin_lock_bh(&ar->data_lock);
5288 list_del(&arvif->list);
5289 spin_unlock_bh(&ar->data_lock);
5290
5291 err:
5292 if (arvif->beacon_buf) {
5293 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
5294 arvif->beacon_buf, arvif->beacon_paddr);
5295 arvif->beacon_buf = NULL;
5296 }
5297
5298 mutex_unlock(&ar->conf_mutex);
5299
5300 return ret;
5301 }
5302
ath10k_mac_vif_tx_unlock_all(struct ath10k_vif * arvif)5303 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
5304 {
5305 int i;
5306
5307 for (i = 0; i < BITS_PER_LONG; i++)
5308 ath10k_mac_vif_tx_unlock(arvif, i);
5309 }
5310
ath10k_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5311 static void ath10k_remove_interface(struct ieee80211_hw *hw,
5312 struct ieee80211_vif *vif)
5313 {
5314 struct ath10k *ar = hw->priv;
5315 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5316 struct ath10k_peer *peer;
5317 int ret;
5318 int i;
5319
5320 cancel_work_sync(&arvif->ap_csa_work);
5321 cancel_delayed_work_sync(&arvif->connection_loss_work);
5322
5323 mutex_lock(&ar->conf_mutex);
5324
5325 spin_lock_bh(&ar->data_lock);
5326 ath10k_mac_vif_beacon_cleanup(arvif);
5327 spin_unlock_bh(&ar->data_lock);
5328
5329 ret = ath10k_spectral_vif_stop(arvif);
5330 if (ret)
5331 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
5332 arvif->vdev_id, ret);
5333
5334 ar->free_vdev_map |= 1LL << arvif->vdev_id;
5335 spin_lock_bh(&ar->data_lock);
5336 list_del(&arvif->list);
5337 spin_unlock_bh(&ar->data_lock);
5338
5339 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5340 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5341 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
5342 vif->addr);
5343 if (ret)
5344 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
5345 arvif->vdev_id, ret);
5346
5347 kfree(arvif->u.ap.noa_data);
5348 }
5349
5350 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
5351 arvif->vdev_id);
5352
5353 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5354 if (ret)
5355 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
5356 arvif->vdev_id, ret);
5357
5358 /* Some firmware revisions don't notify host about self-peer removal
5359 * until after associated vdev is deleted.
5360 */
5361 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5362 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5363 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
5364 vif->addr);
5365 if (ret)
5366 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
5367 arvif->vdev_id, ret);
5368
5369 spin_lock_bh(&ar->data_lock);
5370 ar->num_peers--;
5371 spin_unlock_bh(&ar->data_lock);
5372 }
5373
5374 spin_lock_bh(&ar->data_lock);
5375 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5376 peer = ar->peer_map[i];
5377 if (!peer)
5378 continue;
5379
5380 if (peer->vif == vif) {
5381 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5382 vif->addr, arvif->vdev_id);
5383 peer->vif = NULL;
5384 }
5385 }
5386 spin_unlock_bh(&ar->data_lock);
5387
5388 ath10k_peer_cleanup(ar, arvif->vdev_id);
5389 ath10k_mac_txq_unref(ar, vif->txq);
5390
5391 if (vif->type == NL80211_IFTYPE_MONITOR) {
5392 ar->monitor_arvif = NULL;
5393 ret = ath10k_monitor_recalc(ar);
5394 if (ret)
5395 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5396 }
5397
5398 ret = ath10k_mac_txpower_recalc(ar);
5399 if (ret)
5400 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5401
5402 spin_lock_bh(&ar->htt.tx_lock);
5403 ath10k_mac_vif_tx_unlock_all(arvif);
5404 spin_unlock_bh(&ar->htt.tx_lock);
5405
5406 ath10k_mac_txq_unref(ar, vif->txq);
5407
5408 mutex_unlock(&ar->conf_mutex);
5409 }
5410
5411 /*
5412 * FIXME: Has to be verified.
5413 */
5414 #define SUPPORTED_FILTERS \
5415 (FIF_ALLMULTI | \
5416 FIF_CONTROL | \
5417 FIF_PSPOLL | \
5418 FIF_OTHER_BSS | \
5419 FIF_BCN_PRBRESP_PROMISC | \
5420 FIF_PROBE_REQ | \
5421 FIF_FCSFAIL)
5422
ath10k_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * total_flags,u64 multicast)5423 static void ath10k_configure_filter(struct ieee80211_hw *hw,
5424 unsigned int changed_flags,
5425 unsigned int *total_flags,
5426 u64 multicast)
5427 {
5428 struct ath10k *ar = hw->priv;
5429 int ret;
5430
5431 mutex_lock(&ar->conf_mutex);
5432
5433 changed_flags &= SUPPORTED_FILTERS;
5434 *total_flags &= SUPPORTED_FILTERS;
5435 ar->filter_flags = *total_flags;
5436
5437 ret = ath10k_monitor_recalc(ar);
5438 if (ret)
5439 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5440
5441 mutex_unlock(&ar->conf_mutex);
5442 }
5443
ath10k_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * info,u32 changed)5444 static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5445 struct ieee80211_vif *vif,
5446 struct ieee80211_bss_conf *info,
5447 u32 changed)
5448 {
5449 struct ath10k *ar = hw->priv;
5450 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5451 struct cfg80211_chan_def def;
5452 u32 vdev_param, pdev_param, slottime, preamble;
5453 u16 bitrate, hw_value;
5454 u8 rate;
5455 int rateidx, ret = 0;
5456 enum nl80211_band band;
5457
5458 mutex_lock(&ar->conf_mutex);
5459
5460 if (changed & BSS_CHANGED_IBSS)
5461 ath10k_control_ibss(arvif, info, vif->addr);
5462
5463 if (changed & BSS_CHANGED_BEACON_INT) {
5464 arvif->beacon_interval = info->beacon_int;
5465 vdev_param = ar->wmi.vdev_param->beacon_interval;
5466 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5467 arvif->beacon_interval);
5468 ath10k_dbg(ar, ATH10K_DBG_MAC,
5469 "mac vdev %d beacon_interval %d\n",
5470 arvif->vdev_id, arvif->beacon_interval);
5471
5472 if (ret)
5473 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
5474 arvif->vdev_id, ret);
5475 }
5476
5477 if (changed & BSS_CHANGED_BEACON) {
5478 ath10k_dbg(ar, ATH10K_DBG_MAC,
5479 "vdev %d set beacon tx mode to staggered\n",
5480 arvif->vdev_id);
5481
5482 pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
5483 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
5484 WMI_BEACON_STAGGERED_MODE);
5485 if (ret)
5486 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
5487 arvif->vdev_id, ret);
5488
5489 ret = ath10k_mac_setup_bcn_tmpl(arvif);
5490 if (ret)
5491 ath10k_warn(ar, "failed to update beacon template: %d\n",
5492 ret);
5493
5494 if (ieee80211_vif_is_mesh(vif)) {
5495 /* mesh doesn't use SSID but firmware needs it */
5496 strncpy(arvif->u.ap.ssid, "mesh",
5497 sizeof(arvif->u.ap.ssid));
5498 arvif->u.ap.ssid_len = 4;
5499 }
5500 }
5501
5502 if (changed & BSS_CHANGED_AP_PROBE_RESP) {
5503 ret = ath10k_mac_setup_prb_tmpl(arvif);
5504 if (ret)
5505 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
5506 arvif->vdev_id, ret);
5507 }
5508
5509 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
5510 arvif->dtim_period = info->dtim_period;
5511
5512 ath10k_dbg(ar, ATH10K_DBG_MAC,
5513 "mac vdev %d dtim_period %d\n",
5514 arvif->vdev_id, arvif->dtim_period);
5515
5516 vdev_param = ar->wmi.vdev_param->dtim_period;
5517 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5518 arvif->dtim_period);
5519 if (ret)
5520 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
5521 arvif->vdev_id, ret);
5522 }
5523
5524 if (changed & BSS_CHANGED_SSID &&
5525 vif->type == NL80211_IFTYPE_AP) {
5526 arvif->u.ap.ssid_len = info->ssid_len;
5527 if (info->ssid_len)
5528 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
5529 arvif->u.ap.hidden_ssid = info->hidden_ssid;
5530 }
5531
5532 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
5533 ether_addr_copy(arvif->bssid, info->bssid);
5534
5535 if (changed & BSS_CHANGED_BEACON_ENABLED)
5536 ath10k_control_beaconing(arvif, info);
5537
5538 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
5539 arvif->use_cts_prot = info->use_cts_prot;
5540
5541 ret = ath10k_recalc_rtscts_prot(arvif);
5542 if (ret)
5543 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
5544 arvif->vdev_id, ret);
5545
5546 if (ath10k_mac_can_set_cts_prot(arvif)) {
5547 ret = ath10k_mac_set_cts_prot(arvif);
5548 if (ret)
5549 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
5550 arvif->vdev_id, ret);
5551 }
5552 }
5553
5554 if (changed & BSS_CHANGED_ERP_SLOT) {
5555 if (info->use_short_slot)
5556 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
5557
5558 else
5559 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
5560
5561 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
5562 arvif->vdev_id, slottime);
5563
5564 vdev_param = ar->wmi.vdev_param->slot_time;
5565 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5566 slottime);
5567 if (ret)
5568 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
5569 arvif->vdev_id, ret);
5570 }
5571
5572 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
5573 if (info->use_short_preamble)
5574 preamble = WMI_VDEV_PREAMBLE_SHORT;
5575 else
5576 preamble = WMI_VDEV_PREAMBLE_LONG;
5577
5578 ath10k_dbg(ar, ATH10K_DBG_MAC,
5579 "mac vdev %d preamble %dn",
5580 arvif->vdev_id, preamble);
5581
5582 vdev_param = ar->wmi.vdev_param->preamble;
5583 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5584 preamble);
5585 if (ret)
5586 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
5587 arvif->vdev_id, ret);
5588 }
5589
5590 if (changed & BSS_CHANGED_ASSOC) {
5591 if (info->assoc) {
5592 /* Workaround: Make sure monitor vdev is not running
5593 * when associating to prevent some firmware revisions
5594 * (e.g. 10.1 and 10.2) from crashing.
5595 */
5596 if (ar->monitor_started)
5597 ath10k_monitor_stop(ar);
5598 ath10k_bss_assoc(hw, vif, info);
5599 ath10k_monitor_recalc(ar);
5600 } else {
5601 ath10k_bss_disassoc(hw, vif);
5602 }
5603 }
5604
5605 if (changed & BSS_CHANGED_TXPOWER) {
5606 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
5607 arvif->vdev_id, info->txpower);
5608
5609 arvif->txpower = info->txpower;
5610 ret = ath10k_mac_txpower_recalc(ar);
5611 if (ret)
5612 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5613 }
5614
5615 if (changed & BSS_CHANGED_PS) {
5616 arvif->ps = vif->bss_conf.ps;
5617
5618 ret = ath10k_config_ps(ar);
5619 if (ret)
5620 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
5621 arvif->vdev_id, ret);
5622 }
5623
5624 if (changed & BSS_CHANGED_MCAST_RATE &&
5625 !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
5626 band = def.chan->band;
5627 rateidx = vif->bss_conf.mcast_rate[band] - 1;
5628
5629 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
5630 rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
5631
5632 bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate;
5633 hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value;
5634 if (ath10k_mac_bitrate_is_cck(bitrate))
5635 preamble = WMI_RATE_PREAMBLE_CCK;
5636 else
5637 preamble = WMI_RATE_PREAMBLE_OFDM;
5638
5639 rate = ATH10K_HW_RATECODE(hw_value, 0, preamble);
5640
5641 ath10k_dbg(ar, ATH10K_DBG_MAC,
5642 "mac vdev %d mcast_rate %x\n",
5643 arvif->vdev_id, rate);
5644
5645 vdev_param = ar->wmi.vdev_param->mcast_data_rate;
5646 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
5647 vdev_param, rate);
5648 if (ret)
5649 ath10k_warn(ar,
5650 "failed to set mcast rate on vdev %i: %d\n",
5651 arvif->vdev_id, ret);
5652
5653 vdev_param = ar->wmi.vdev_param->bcast_data_rate;
5654 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
5655 vdev_param, rate);
5656 if (ret)
5657 ath10k_warn(ar,
5658 "failed to set bcast rate on vdev %i: %d\n",
5659 arvif->vdev_id, ret);
5660 }
5661
5662 mutex_unlock(&ar->conf_mutex);
5663 }
5664
ath10k_mac_op_set_coverage_class(struct ieee80211_hw * hw,s16 value)5665 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
5666 {
5667 struct ath10k *ar = hw->priv;
5668
5669 /* This function should never be called if setting the coverage class
5670 * is not supported on this hardware.
5671 */
5672 if (!ar->hw_params.hw_ops->set_coverage_class) {
5673 WARN_ON_ONCE(1);
5674 return;
5675 }
5676 ar->hw_params.hw_ops->set_coverage_class(ar, value);
5677 }
5678
5679 struct ath10k_mac_tdls_iter_data {
5680 u32 num_tdls_stations;
5681 struct ieee80211_vif *curr_vif;
5682 };
5683
ath10k_mac_tdls_vif_stations_count_iter(void * data,struct ieee80211_sta * sta)5684 static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
5685 struct ieee80211_sta *sta)
5686 {
5687 struct ath10k_mac_tdls_iter_data *iter_data = data;
5688 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5689 struct ieee80211_vif *sta_vif = arsta->arvif->vif;
5690
5691 if (sta->tdls && sta_vif == iter_data->curr_vif)
5692 iter_data->num_tdls_stations++;
5693 }
5694
ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5695 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
5696 struct ieee80211_vif *vif)
5697 {
5698 struct ath10k_mac_tdls_iter_data data = {};
5699
5700 data.curr_vif = vif;
5701
5702 ieee80211_iterate_stations_atomic(hw,
5703 ath10k_mac_tdls_vif_stations_count_iter,
5704 &data);
5705 return data.num_tdls_stations;
5706 }
5707
ath10k_mac_tdls_vifs_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)5708 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
5709 struct ieee80211_vif *vif)
5710 {
5711 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5712 int *num_tdls_vifs = data;
5713
5714 if (vif->type != NL80211_IFTYPE_STATION)
5715 return;
5716
5717 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
5718 (*num_tdls_vifs)++;
5719 }
5720
ath10k_mac_tdls_vifs_count(struct ieee80211_hw * hw)5721 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
5722 {
5723 int num_tdls_vifs = 0;
5724
5725 ieee80211_iterate_active_interfaces_atomic(hw,
5726 IEEE80211_IFACE_ITER_NORMAL,
5727 ath10k_mac_tdls_vifs_count_iter,
5728 &num_tdls_vifs);
5729 return num_tdls_vifs;
5730 }
5731
ath10k_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)5732 static int ath10k_hw_scan(struct ieee80211_hw *hw,
5733 struct ieee80211_vif *vif,
5734 struct ieee80211_scan_request *hw_req)
5735 {
5736 struct ath10k *ar = hw->priv;
5737 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5738 struct cfg80211_scan_request *req = &hw_req->req;
5739 struct wmi_start_scan_arg arg;
5740 int ret = 0;
5741 int i;
5742 u32 scan_timeout;
5743
5744 mutex_lock(&ar->conf_mutex);
5745
5746 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
5747 ret = -EBUSY;
5748 goto exit;
5749 }
5750
5751 spin_lock_bh(&ar->data_lock);
5752 switch (ar->scan.state) {
5753 case ATH10K_SCAN_IDLE:
5754 reinit_completion(&ar->scan.started);
5755 reinit_completion(&ar->scan.completed);
5756 ar->scan.state = ATH10K_SCAN_STARTING;
5757 ar->scan.is_roc = false;
5758 ar->scan.vdev_id = arvif->vdev_id;
5759 ret = 0;
5760 break;
5761 case ATH10K_SCAN_STARTING:
5762 case ATH10K_SCAN_RUNNING:
5763 case ATH10K_SCAN_ABORTING:
5764 ret = -EBUSY;
5765 break;
5766 }
5767 spin_unlock_bh(&ar->data_lock);
5768
5769 if (ret)
5770 goto exit;
5771
5772 memset(&arg, 0, sizeof(arg));
5773 ath10k_wmi_start_scan_init(ar, &arg);
5774 arg.vdev_id = arvif->vdev_id;
5775 arg.scan_id = ATH10K_SCAN_ID;
5776
5777 if (req->ie_len) {
5778 arg.ie_len = req->ie_len;
5779 memcpy(arg.ie, req->ie, arg.ie_len);
5780 }
5781
5782 if (req->n_ssids) {
5783 arg.n_ssids = req->n_ssids;
5784 for (i = 0; i < arg.n_ssids; i++) {
5785 arg.ssids[i].len = req->ssids[i].ssid_len;
5786 arg.ssids[i].ssid = req->ssids[i].ssid;
5787 }
5788 } else {
5789 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
5790 }
5791
5792 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
5793 arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ;
5794 ether_addr_copy(arg.mac_addr.addr, req->mac_addr);
5795 ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
5796 }
5797
5798 if (req->n_channels) {
5799 arg.n_channels = req->n_channels;
5800 for (i = 0; i < arg.n_channels; i++)
5801 arg.channels[i] = req->channels[i]->center_freq;
5802 }
5803
5804 /* if duration is set, default dwell times will be overwritten */
5805 if (req->duration) {
5806 arg.dwell_time_active = req->duration;
5807 arg.dwell_time_passive = req->duration;
5808 arg.burst_duration_ms = req->duration;
5809
5810 scan_timeout = min_t(u32, arg.max_rest_time *
5811 (arg.n_channels - 1) + (req->duration +
5812 ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
5813 arg.n_channels, arg.max_scan_time + 200);
5814
5815 } else {
5816 /* Add a 200ms margin to account for event/command processing */
5817 scan_timeout = arg.max_scan_time + 200;
5818 }
5819
5820 ret = ath10k_start_scan(ar, &arg);
5821 if (ret) {
5822 ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
5823 spin_lock_bh(&ar->data_lock);
5824 ar->scan.state = ATH10K_SCAN_IDLE;
5825 spin_unlock_bh(&ar->data_lock);
5826 }
5827
5828 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
5829 msecs_to_jiffies(scan_timeout));
5830
5831 exit:
5832 mutex_unlock(&ar->conf_mutex);
5833 return ret;
5834 }
5835
ath10k_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5836 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
5837 struct ieee80211_vif *vif)
5838 {
5839 struct ath10k *ar = hw->priv;
5840
5841 mutex_lock(&ar->conf_mutex);
5842 ath10k_scan_abort(ar);
5843 mutex_unlock(&ar->conf_mutex);
5844
5845 cancel_delayed_work_sync(&ar->scan.timeout);
5846 }
5847
ath10k_set_key_h_def_keyidx(struct ath10k * ar,struct ath10k_vif * arvif,enum set_key_cmd cmd,struct ieee80211_key_conf * key)5848 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
5849 struct ath10k_vif *arvif,
5850 enum set_key_cmd cmd,
5851 struct ieee80211_key_conf *key)
5852 {
5853 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
5854 int ret;
5855
5856 /* 10.1 firmware branch requires default key index to be set to group
5857 * key index after installing it. Otherwise FW/HW Txes corrupted
5858 * frames with multi-vif APs. This is not required for main firmware
5859 * branch (e.g. 636).
5860 *
5861 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
5862 *
5863 * FIXME: It remains unknown if this is required for multi-vif STA
5864 * interfaces on 10.1.
5865 */
5866
5867 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
5868 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
5869 return;
5870
5871 if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
5872 return;
5873
5874 if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
5875 return;
5876
5877 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5878 return;
5879
5880 if (cmd != SET_KEY)
5881 return;
5882
5883 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5884 key->keyidx);
5885 if (ret)
5886 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
5887 arvif->vdev_id, ret);
5888 }
5889
ath10k_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)5890 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5891 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5892 struct ieee80211_key_conf *key)
5893 {
5894 struct ath10k *ar = hw->priv;
5895 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5896 struct ath10k_peer *peer;
5897 const u8 *peer_addr;
5898 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5899 key->cipher == WLAN_CIPHER_SUITE_WEP104;
5900 int ret = 0;
5901 int ret2;
5902 u32 flags = 0;
5903 u32 flags2;
5904
5905 /* this one needs to be done in software */
5906 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
5907 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
5908 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
5909 key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
5910 return 1;
5911
5912 if (arvif->nohwcrypt)
5913 return 1;
5914
5915 if (key->keyidx > WMI_MAX_KEY_INDEX)
5916 return -ENOSPC;
5917
5918 mutex_lock(&ar->conf_mutex);
5919
5920 if (sta)
5921 peer_addr = sta->addr;
5922 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
5923 peer_addr = vif->bss_conf.bssid;
5924 else
5925 peer_addr = vif->addr;
5926
5927 key->hw_key_idx = key->keyidx;
5928
5929 if (is_wep) {
5930 if (cmd == SET_KEY)
5931 arvif->wep_keys[key->keyidx] = key;
5932 else
5933 arvif->wep_keys[key->keyidx] = NULL;
5934 }
5935
5936 /* the peer should not disappear in mid-way (unless FW goes awry) since
5937 * we already hold conf_mutex. we just make sure its there now.
5938 */
5939 spin_lock_bh(&ar->data_lock);
5940 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5941 spin_unlock_bh(&ar->data_lock);
5942
5943 if (!peer) {
5944 if (cmd == SET_KEY) {
5945 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
5946 peer_addr);
5947 ret = -EOPNOTSUPP;
5948 goto exit;
5949 } else {
5950 /* if the peer doesn't exist there is no key to disable anymore */
5951 goto exit;
5952 }
5953 }
5954
5955 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5956 flags |= WMI_KEY_PAIRWISE;
5957 else
5958 flags |= WMI_KEY_GROUP;
5959
5960 if (is_wep) {
5961 if (cmd == DISABLE_KEY)
5962 ath10k_clear_vdev_key(arvif, key);
5963
5964 /* When WEP keys are uploaded it's possible that there are
5965 * stations associated already (e.g. when merging) without any
5966 * keys. Static WEP needs an explicit per-peer key upload.
5967 */
5968 if (vif->type == NL80211_IFTYPE_ADHOC &&
5969 cmd == SET_KEY)
5970 ath10k_mac_vif_update_wep_key(arvif, key);
5971
5972 /* 802.1x never sets the def_wep_key_idx so each set_key()
5973 * call changes default tx key.
5974 *
5975 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
5976 * after first set_key().
5977 */
5978 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
5979 flags |= WMI_KEY_TX_USAGE;
5980 }
5981
5982 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
5983 if (ret) {
5984 WARN_ON(ret > 0);
5985 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
5986 arvif->vdev_id, peer_addr, ret);
5987 goto exit;
5988 }
5989
5990 /* mac80211 sets static WEP keys as groupwise while firmware requires
5991 * them to be installed twice as both pairwise and groupwise.
5992 */
5993 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
5994 flags2 = flags;
5995 flags2 &= ~WMI_KEY_GROUP;
5996 flags2 |= WMI_KEY_PAIRWISE;
5997
5998 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
5999 if (ret) {
6000 WARN_ON(ret > 0);
6001 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
6002 arvif->vdev_id, peer_addr, ret);
6003 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
6004 peer_addr, flags);
6005 if (ret2) {
6006 WARN_ON(ret2 > 0);
6007 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
6008 arvif->vdev_id, peer_addr, ret2);
6009 }
6010 goto exit;
6011 }
6012 }
6013
6014 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
6015
6016 spin_lock_bh(&ar->data_lock);
6017 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
6018 if (peer && cmd == SET_KEY)
6019 peer->keys[key->keyidx] = key;
6020 else if (peer && cmd == DISABLE_KEY)
6021 peer->keys[key->keyidx] = NULL;
6022 else if (peer == NULL)
6023 /* impossible unless FW goes crazy */
6024 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
6025 spin_unlock_bh(&ar->data_lock);
6026
6027 if (sta && sta->tdls)
6028 ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6029 WMI_PEER_AUTHORIZE, 1);
6030
6031 exit:
6032 mutex_unlock(&ar->conf_mutex);
6033 return ret;
6034 }
6035
ath10k_set_default_unicast_key(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int keyidx)6036 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
6037 struct ieee80211_vif *vif,
6038 int keyidx)
6039 {
6040 struct ath10k *ar = hw->priv;
6041 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6042 int ret;
6043
6044 mutex_lock(&arvif->ar->conf_mutex);
6045
6046 if (arvif->ar->state != ATH10K_STATE_ON)
6047 goto unlock;
6048
6049 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
6050 arvif->vdev_id, keyidx);
6051
6052 ret = ath10k_wmi_vdev_set_param(arvif->ar,
6053 arvif->vdev_id,
6054 arvif->ar->wmi.vdev_param->def_keyid,
6055 keyidx);
6056
6057 if (ret) {
6058 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
6059 arvif->vdev_id,
6060 ret);
6061 goto unlock;
6062 }
6063
6064 arvif->def_wep_key_idx = keyidx;
6065
6066 unlock:
6067 mutex_unlock(&arvif->ar->conf_mutex);
6068 }
6069
ath10k_sta_rc_update_wk(struct work_struct * wk)6070 static void ath10k_sta_rc_update_wk(struct work_struct *wk)
6071 {
6072 struct ath10k *ar;
6073 struct ath10k_vif *arvif;
6074 struct ath10k_sta *arsta;
6075 struct ieee80211_sta *sta;
6076 struct cfg80211_chan_def def;
6077 enum nl80211_band band;
6078 const u8 *ht_mcs_mask;
6079 const u16 *vht_mcs_mask;
6080 u32 changed, bw, nss, smps;
6081 int err;
6082
6083 arsta = container_of(wk, struct ath10k_sta, update_wk);
6084 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
6085 arvif = arsta->arvif;
6086 ar = arvif->ar;
6087
6088 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
6089 return;
6090
6091 band = def.chan->band;
6092 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
6093 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
6094
6095 spin_lock_bh(&ar->data_lock);
6096
6097 changed = arsta->changed;
6098 arsta->changed = 0;
6099
6100 bw = arsta->bw;
6101 nss = arsta->nss;
6102 smps = arsta->smps;
6103
6104 spin_unlock_bh(&ar->data_lock);
6105
6106 mutex_lock(&ar->conf_mutex);
6107
6108 nss = max_t(u32, 1, nss);
6109 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
6110 ath10k_mac_max_vht_nss(vht_mcs_mask)));
6111
6112 if (changed & IEEE80211_RC_BW_CHANGED) {
6113 enum wmi_phy_mode mode;
6114
6115 mode = chan_to_phymode(&def);
6116 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
6117 sta->addr, bw, mode);
6118
6119 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6120 WMI_PEER_PHYMODE, mode);
6121 if (err) {
6122 ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
6123 sta->addr, mode, err);
6124 goto exit;
6125 }
6126
6127 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6128 WMI_PEER_CHAN_WIDTH, bw);
6129 if (err)
6130 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
6131 sta->addr, bw, err);
6132 }
6133
6134 if (changed & IEEE80211_RC_NSS_CHANGED) {
6135 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
6136 sta->addr, nss);
6137
6138 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6139 WMI_PEER_NSS, nss);
6140 if (err)
6141 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
6142 sta->addr, nss, err);
6143 }
6144
6145 if (changed & IEEE80211_RC_SMPS_CHANGED) {
6146 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
6147 sta->addr, smps);
6148
6149 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6150 WMI_PEER_SMPS_STATE, smps);
6151 if (err)
6152 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
6153 sta->addr, smps, err);
6154 }
6155
6156 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
6157 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
6158 sta->addr);
6159
6160 err = ath10k_station_assoc(ar, arvif->vif, sta, true);
6161 if (err)
6162 ath10k_warn(ar, "failed to reassociate station: %pM\n",
6163 sta->addr);
6164 }
6165
6166 exit:
6167 mutex_unlock(&ar->conf_mutex);
6168 }
6169
ath10k_mac_inc_num_stations(struct ath10k_vif * arvif,struct ieee80211_sta * sta)6170 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
6171 struct ieee80211_sta *sta)
6172 {
6173 struct ath10k *ar = arvif->ar;
6174
6175 lockdep_assert_held(&ar->conf_mutex);
6176
6177 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
6178 return 0;
6179
6180 if (ar->num_stations >= ar->max_num_stations)
6181 return -ENOBUFS;
6182
6183 ar->num_stations++;
6184
6185 return 0;
6186 }
6187
ath10k_mac_dec_num_stations(struct ath10k_vif * arvif,struct ieee80211_sta * sta)6188 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
6189 struct ieee80211_sta *sta)
6190 {
6191 struct ath10k *ar = arvif->ar;
6192
6193 lockdep_assert_held(&ar->conf_mutex);
6194
6195 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
6196 return;
6197
6198 ar->num_stations--;
6199 }
6200
ath10k_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)6201 static int ath10k_sta_state(struct ieee80211_hw *hw,
6202 struct ieee80211_vif *vif,
6203 struct ieee80211_sta *sta,
6204 enum ieee80211_sta_state old_state,
6205 enum ieee80211_sta_state new_state)
6206 {
6207 struct ath10k *ar = hw->priv;
6208 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6209 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6210 struct ath10k_peer *peer;
6211 int ret = 0;
6212 int i;
6213
6214 if (old_state == IEEE80211_STA_NOTEXIST &&
6215 new_state == IEEE80211_STA_NONE) {
6216 memset(arsta, 0, sizeof(*arsta));
6217 arsta->arvif = arvif;
6218 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
6219
6220 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
6221 ath10k_mac_txq_init(sta->txq[i]);
6222 }
6223
6224 /* cancel must be done outside the mutex to avoid deadlock */
6225 if ((old_state == IEEE80211_STA_NONE &&
6226 new_state == IEEE80211_STA_NOTEXIST))
6227 cancel_work_sync(&arsta->update_wk);
6228
6229 mutex_lock(&ar->conf_mutex);
6230
6231 if (old_state == IEEE80211_STA_NOTEXIST &&
6232 new_state == IEEE80211_STA_NONE) {
6233 /*
6234 * New station addition.
6235 */
6236 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
6237 u32 num_tdls_stations;
6238 u32 num_tdls_vifs;
6239
6240 ath10k_dbg(ar, ATH10K_DBG_MAC,
6241 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
6242 arvif->vdev_id, sta->addr,
6243 ar->num_stations + 1, ar->max_num_stations,
6244 ar->num_peers + 1, ar->max_num_peers);
6245
6246 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
6247 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
6248
6249 if (sta->tdls) {
6250 if (num_tdls_stations >= ar->max_num_tdls_vdevs) {
6251 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
6252 arvif->vdev_id,
6253 ar->max_num_tdls_vdevs);
6254 ret = -ELNRNG;
6255 goto exit;
6256 }
6257 peer_type = WMI_PEER_TYPE_TDLS;
6258 }
6259
6260 ret = ath10k_mac_inc_num_stations(arvif, sta);
6261 if (ret) {
6262 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
6263 ar->max_num_stations);
6264 goto exit;
6265 }
6266
6267 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
6268 sta->addr, peer_type);
6269 if (ret) {
6270 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
6271 sta->addr, arvif->vdev_id, ret);
6272 ath10k_mac_dec_num_stations(arvif, sta);
6273 goto exit;
6274 }
6275
6276 spin_lock_bh(&ar->data_lock);
6277
6278 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
6279 if (!peer) {
6280 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
6281 vif->addr, arvif->vdev_id);
6282 spin_unlock_bh(&ar->data_lock);
6283 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6284 ath10k_mac_dec_num_stations(arvif, sta);
6285 ret = -ENOENT;
6286 goto exit;
6287 }
6288
6289 arsta->peer_id = find_first_bit(peer->peer_ids,
6290 ATH10K_MAX_NUM_PEER_IDS);
6291
6292 spin_unlock_bh(&ar->data_lock);
6293
6294 if (!sta->tdls)
6295 goto exit;
6296
6297 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6298 WMI_TDLS_ENABLE_ACTIVE);
6299 if (ret) {
6300 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6301 arvif->vdev_id, ret);
6302 ath10k_peer_delete(ar, arvif->vdev_id,
6303 sta->addr);
6304 ath10k_mac_dec_num_stations(arvif, sta);
6305 goto exit;
6306 }
6307
6308 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6309 WMI_TDLS_PEER_STATE_PEERING);
6310 if (ret) {
6311 ath10k_warn(ar,
6312 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
6313 sta->addr, arvif->vdev_id, ret);
6314 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6315 ath10k_mac_dec_num_stations(arvif, sta);
6316
6317 if (num_tdls_stations != 0)
6318 goto exit;
6319 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6320 WMI_TDLS_DISABLE);
6321 }
6322 } else if ((old_state == IEEE80211_STA_NONE &&
6323 new_state == IEEE80211_STA_NOTEXIST)) {
6324 /*
6325 * Existing station deletion.
6326 */
6327 ath10k_dbg(ar, ATH10K_DBG_MAC,
6328 "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
6329 arvif->vdev_id, sta->addr, sta);
6330
6331 if (sta->tdls) {
6332 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
6333 sta,
6334 WMI_TDLS_PEER_STATE_TEARDOWN);
6335 if (ret)
6336 ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
6337 sta->addr,
6338 WMI_TDLS_PEER_STATE_TEARDOWN, ret);
6339 }
6340
6341 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6342 if (ret)
6343 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
6344 sta->addr, arvif->vdev_id, ret);
6345
6346 ath10k_mac_dec_num_stations(arvif, sta);
6347
6348 spin_lock_bh(&ar->data_lock);
6349 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
6350 peer = ar->peer_map[i];
6351 if (!peer)
6352 continue;
6353
6354 if (peer->sta == sta) {
6355 ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
6356 sta->addr, peer, i, arvif->vdev_id);
6357 peer->sta = NULL;
6358
6359 /* Clean up the peer object as well since we
6360 * must have failed to do this above.
6361 */
6362 list_del(&peer->list);
6363 ar->peer_map[i] = NULL;
6364 kfree(peer);
6365 ar->num_peers--;
6366 }
6367 }
6368 spin_unlock_bh(&ar->data_lock);
6369
6370 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
6371 ath10k_mac_txq_unref(ar, sta->txq[i]);
6372
6373 if (!sta->tdls)
6374 goto exit;
6375
6376 if (ath10k_mac_tdls_vif_stations_count(hw, vif))
6377 goto exit;
6378
6379 /* This was the last tdls peer in current vif */
6380 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6381 WMI_TDLS_DISABLE);
6382 if (ret) {
6383 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6384 arvif->vdev_id, ret);
6385 }
6386 } else if (old_state == IEEE80211_STA_AUTH &&
6387 new_state == IEEE80211_STA_ASSOC &&
6388 (vif->type == NL80211_IFTYPE_AP ||
6389 vif->type == NL80211_IFTYPE_MESH_POINT ||
6390 vif->type == NL80211_IFTYPE_ADHOC)) {
6391 /*
6392 * New association.
6393 */
6394 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
6395 sta->addr);
6396
6397 ret = ath10k_station_assoc(ar, vif, sta, false);
6398 if (ret)
6399 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
6400 sta->addr, arvif->vdev_id, ret);
6401 } else if (old_state == IEEE80211_STA_ASSOC &&
6402 new_state == IEEE80211_STA_AUTHORIZED &&
6403 sta->tdls) {
6404 /*
6405 * Tdls station authorized.
6406 */
6407 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
6408 sta->addr);
6409
6410 ret = ath10k_station_assoc(ar, vif, sta, false);
6411 if (ret) {
6412 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
6413 sta->addr, arvif->vdev_id, ret);
6414 goto exit;
6415 }
6416
6417 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6418 WMI_TDLS_PEER_STATE_CONNECTED);
6419 if (ret)
6420 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
6421 sta->addr, arvif->vdev_id, ret);
6422 } else if (old_state == IEEE80211_STA_ASSOC &&
6423 new_state == IEEE80211_STA_AUTH &&
6424 (vif->type == NL80211_IFTYPE_AP ||
6425 vif->type == NL80211_IFTYPE_MESH_POINT ||
6426 vif->type == NL80211_IFTYPE_ADHOC)) {
6427 /*
6428 * Disassociation.
6429 */
6430 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
6431 sta->addr);
6432
6433 ret = ath10k_station_disassoc(ar, vif, sta);
6434 if (ret)
6435 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
6436 sta->addr, arvif->vdev_id, ret);
6437 }
6438 exit:
6439 mutex_unlock(&ar->conf_mutex);
6440 return ret;
6441 }
6442
ath10k_conf_tx_uapsd(struct ath10k * ar,struct ieee80211_vif * vif,u16 ac,bool enable)6443 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
6444 u16 ac, bool enable)
6445 {
6446 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6447 struct wmi_sta_uapsd_auto_trig_arg arg = {};
6448 u32 prio = 0, acc = 0;
6449 u32 value = 0;
6450 int ret = 0;
6451
6452 lockdep_assert_held(&ar->conf_mutex);
6453
6454 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
6455 return 0;
6456
6457 switch (ac) {
6458 case IEEE80211_AC_VO:
6459 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
6460 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
6461 prio = 7;
6462 acc = 3;
6463 break;
6464 case IEEE80211_AC_VI:
6465 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
6466 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
6467 prio = 5;
6468 acc = 2;
6469 break;
6470 case IEEE80211_AC_BE:
6471 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
6472 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
6473 prio = 2;
6474 acc = 1;
6475 break;
6476 case IEEE80211_AC_BK:
6477 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
6478 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
6479 prio = 0;
6480 acc = 0;
6481 break;
6482 }
6483
6484 if (enable)
6485 arvif->u.sta.uapsd |= value;
6486 else
6487 arvif->u.sta.uapsd &= ~value;
6488
6489 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6490 WMI_STA_PS_PARAM_UAPSD,
6491 arvif->u.sta.uapsd);
6492 if (ret) {
6493 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
6494 goto exit;
6495 }
6496
6497 if (arvif->u.sta.uapsd)
6498 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
6499 else
6500 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
6501
6502 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6503 WMI_STA_PS_PARAM_RX_WAKE_POLICY,
6504 value);
6505 if (ret)
6506 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
6507
6508 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
6509 if (ret) {
6510 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
6511 arvif->vdev_id, ret);
6512 return ret;
6513 }
6514
6515 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
6516 if (ret) {
6517 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
6518 arvif->vdev_id, ret);
6519 return ret;
6520 }
6521
6522 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
6523 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
6524 /* Only userspace can make an educated decision when to send
6525 * trigger frame. The following effectively disables u-UAPSD
6526 * autotrigger in firmware (which is enabled by default
6527 * provided the autotrigger service is available).
6528 */
6529
6530 arg.wmm_ac = acc;
6531 arg.user_priority = prio;
6532 arg.service_interval = 0;
6533 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6534 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6535
6536 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
6537 arvif->bssid, &arg, 1);
6538 if (ret) {
6539 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
6540 ret);
6541 return ret;
6542 }
6543 }
6544
6545 exit:
6546 return ret;
6547 }
6548
ath10k_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 ac,const struct ieee80211_tx_queue_params * params)6549 static int ath10k_conf_tx(struct ieee80211_hw *hw,
6550 struct ieee80211_vif *vif, u16 ac,
6551 const struct ieee80211_tx_queue_params *params)
6552 {
6553 struct ath10k *ar = hw->priv;
6554 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6555 struct wmi_wmm_params_arg *p = NULL;
6556 int ret;
6557
6558 mutex_lock(&ar->conf_mutex);
6559
6560 switch (ac) {
6561 case IEEE80211_AC_VO:
6562 p = &arvif->wmm_params.ac_vo;
6563 break;
6564 case IEEE80211_AC_VI:
6565 p = &arvif->wmm_params.ac_vi;
6566 break;
6567 case IEEE80211_AC_BE:
6568 p = &arvif->wmm_params.ac_be;
6569 break;
6570 case IEEE80211_AC_BK:
6571 p = &arvif->wmm_params.ac_bk;
6572 break;
6573 }
6574
6575 if (WARN_ON(!p)) {
6576 ret = -EINVAL;
6577 goto exit;
6578 }
6579
6580 p->cwmin = params->cw_min;
6581 p->cwmax = params->cw_max;
6582 p->aifs = params->aifs;
6583
6584 /*
6585 * The channel time duration programmed in the HW is in absolute
6586 * microseconds, while mac80211 gives the txop in units of
6587 * 32 microseconds.
6588 */
6589 p->txop = params->txop * 32;
6590
6591 if (ar->wmi.ops->gen_vdev_wmm_conf) {
6592 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
6593 &arvif->wmm_params);
6594 if (ret) {
6595 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
6596 arvif->vdev_id, ret);
6597 goto exit;
6598 }
6599 } else {
6600 /* This won't work well with multi-interface cases but it's
6601 * better than nothing.
6602 */
6603 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
6604 if (ret) {
6605 ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
6606 goto exit;
6607 }
6608 }
6609
6610 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
6611 if (ret)
6612 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
6613
6614 exit:
6615 mutex_unlock(&ar->conf_mutex);
6616 return ret;
6617 }
6618
6619 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
6620
ath10k_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)6621 static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
6622 struct ieee80211_vif *vif,
6623 struct ieee80211_channel *chan,
6624 int duration,
6625 enum ieee80211_roc_type type)
6626 {
6627 struct ath10k *ar = hw->priv;
6628 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6629 struct wmi_start_scan_arg arg;
6630 int ret = 0;
6631 u32 scan_time_msec;
6632
6633 mutex_lock(&ar->conf_mutex);
6634
6635 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
6636 ret = -EBUSY;
6637 goto exit;
6638 }
6639
6640 spin_lock_bh(&ar->data_lock);
6641 switch (ar->scan.state) {
6642 case ATH10K_SCAN_IDLE:
6643 reinit_completion(&ar->scan.started);
6644 reinit_completion(&ar->scan.completed);
6645 reinit_completion(&ar->scan.on_channel);
6646 ar->scan.state = ATH10K_SCAN_STARTING;
6647 ar->scan.is_roc = true;
6648 ar->scan.vdev_id = arvif->vdev_id;
6649 ar->scan.roc_freq = chan->center_freq;
6650 ar->scan.roc_notify = true;
6651 ret = 0;
6652 break;
6653 case ATH10K_SCAN_STARTING:
6654 case ATH10K_SCAN_RUNNING:
6655 case ATH10K_SCAN_ABORTING:
6656 ret = -EBUSY;
6657 break;
6658 }
6659 spin_unlock_bh(&ar->data_lock);
6660
6661 if (ret)
6662 goto exit;
6663
6664 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
6665
6666 memset(&arg, 0, sizeof(arg));
6667 ath10k_wmi_start_scan_init(ar, &arg);
6668 arg.vdev_id = arvif->vdev_id;
6669 arg.scan_id = ATH10K_SCAN_ID;
6670 arg.n_channels = 1;
6671 arg.channels[0] = chan->center_freq;
6672 arg.dwell_time_active = scan_time_msec;
6673 arg.dwell_time_passive = scan_time_msec;
6674 arg.max_scan_time = scan_time_msec;
6675 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6676 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
6677 arg.burst_duration_ms = duration;
6678
6679 ret = ath10k_start_scan(ar, &arg);
6680 if (ret) {
6681 ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
6682 spin_lock_bh(&ar->data_lock);
6683 ar->scan.state = ATH10K_SCAN_IDLE;
6684 spin_unlock_bh(&ar->data_lock);
6685 goto exit;
6686 }
6687
6688 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
6689 if (ret == 0) {
6690 ath10k_warn(ar, "failed to switch to channel for roc scan\n");
6691
6692 ret = ath10k_scan_stop(ar);
6693 if (ret)
6694 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
6695
6696 ret = -ETIMEDOUT;
6697 goto exit;
6698 }
6699
6700 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
6701 msecs_to_jiffies(duration));
6702
6703 ret = 0;
6704 exit:
6705 mutex_unlock(&ar->conf_mutex);
6706 return ret;
6707 }
6708
ath10k_cancel_remain_on_channel(struct ieee80211_hw * hw)6709 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
6710 {
6711 struct ath10k *ar = hw->priv;
6712
6713 mutex_lock(&ar->conf_mutex);
6714
6715 spin_lock_bh(&ar->data_lock);
6716 ar->scan.roc_notify = false;
6717 spin_unlock_bh(&ar->data_lock);
6718
6719 ath10k_scan_abort(ar);
6720
6721 mutex_unlock(&ar->conf_mutex);
6722
6723 cancel_delayed_work_sync(&ar->scan.timeout);
6724
6725 return 0;
6726 }
6727
6728 /*
6729 * Both RTS and Fragmentation threshold are interface-specific
6730 * in ath10k, but device-specific in mac80211.
6731 */
6732
ath10k_set_rts_threshold(struct ieee80211_hw * hw,u32 value)6733 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
6734 {
6735 struct ath10k *ar = hw->priv;
6736 struct ath10k_vif *arvif;
6737 int ret = 0;
6738
6739 mutex_lock(&ar->conf_mutex);
6740 list_for_each_entry(arvif, &ar->arvifs, list) {
6741 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
6742 arvif->vdev_id, value);
6743
6744 ret = ath10k_mac_set_rts(arvif, value);
6745 if (ret) {
6746 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
6747 arvif->vdev_id, ret);
6748 break;
6749 }
6750 }
6751 mutex_unlock(&ar->conf_mutex);
6752
6753 return ret;
6754 }
6755
ath10k_mac_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)6756 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
6757 {
6758 /* Even though there's a WMI enum for fragmentation threshold no known
6759 * firmware actually implements it. Moreover it is not possible to rely
6760 * frame fragmentation to mac80211 because firmware clears the "more
6761 * fragments" bit in frame control making it impossible for remote
6762 * devices to reassemble frames.
6763 *
6764 * Hence implement a dummy callback just to say fragmentation isn't
6765 * supported. This effectively prevents mac80211 from doing frame
6766 * fragmentation in software.
6767 */
6768 return -EOPNOTSUPP;
6769 }
6770
ath10k_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)6771 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6772 u32 queues, bool drop)
6773 {
6774 struct ath10k *ar = hw->priv;
6775 bool skip;
6776 long time_left;
6777
6778 /* mac80211 doesn't care if we really xmit queued frames or not
6779 * we'll collect those frames either way if we stop/delete vdevs
6780 */
6781 if (drop)
6782 return;
6783
6784 mutex_lock(&ar->conf_mutex);
6785
6786 if (ar->state == ATH10K_STATE_WEDGED)
6787 goto skip;
6788
6789 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
6790 bool empty;
6791
6792 spin_lock_bh(&ar->htt.tx_lock);
6793 empty = (ar->htt.num_pending_tx == 0);
6794 spin_unlock_bh(&ar->htt.tx_lock);
6795
6796 skip = (ar->state == ATH10K_STATE_WEDGED) ||
6797 test_bit(ATH10K_FLAG_CRASH_FLUSH,
6798 &ar->dev_flags);
6799
6800 (empty || skip);
6801 }), ATH10K_FLUSH_TIMEOUT_HZ);
6802
6803 if (time_left == 0 || skip)
6804 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
6805 skip, ar->state, time_left);
6806
6807 skip:
6808 mutex_unlock(&ar->conf_mutex);
6809 }
6810
6811 /* TODO: Implement this function properly
6812 * For now it is needed to reply to Probe Requests in IBSS mode.
6813 * Propably we need this information from FW.
6814 */
ath10k_tx_last_beacon(struct ieee80211_hw * hw)6815 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
6816 {
6817 return 1;
6818 }
6819
ath10k_reconfig_complete(struct ieee80211_hw * hw,enum ieee80211_reconfig_type reconfig_type)6820 static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
6821 enum ieee80211_reconfig_type reconfig_type)
6822 {
6823 struct ath10k *ar = hw->priv;
6824
6825 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
6826 return;
6827
6828 mutex_lock(&ar->conf_mutex);
6829
6830 /* If device failed to restart it will be in a different state, e.g.
6831 * ATH10K_STATE_WEDGED
6832 */
6833 if (ar->state == ATH10K_STATE_RESTARTED) {
6834 ath10k_info(ar, "device successfully recovered\n");
6835 ar->state = ATH10K_STATE_ON;
6836 ieee80211_wake_queues(ar->hw);
6837 }
6838
6839 mutex_unlock(&ar->conf_mutex);
6840 }
6841
6842 static void
ath10k_mac_update_bss_chan_survey(struct ath10k * ar,struct ieee80211_channel * channel)6843 ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
6844 struct ieee80211_channel *channel)
6845 {
6846 int ret;
6847 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
6848
6849 lockdep_assert_held(&ar->conf_mutex);
6850
6851 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
6852 (ar->rx_channel != channel))
6853 return;
6854
6855 if (ar->scan.state != ATH10K_SCAN_IDLE) {
6856 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
6857 return;
6858 }
6859
6860 reinit_completion(&ar->bss_survey_done);
6861
6862 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
6863 if (ret) {
6864 ath10k_warn(ar, "failed to send pdev bss chan info request\n");
6865 return;
6866 }
6867
6868 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
6869 if (!ret) {
6870 ath10k_warn(ar, "bss channel survey timed out\n");
6871 return;
6872 }
6873 }
6874
ath10k_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)6875 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
6876 struct survey_info *survey)
6877 {
6878 struct ath10k *ar = hw->priv;
6879 struct ieee80211_supported_band *sband;
6880 struct survey_info *ar_survey = &ar->survey[idx];
6881 int ret = 0;
6882
6883 mutex_lock(&ar->conf_mutex);
6884
6885 sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
6886 if (sband && idx >= sband->n_channels) {
6887 idx -= sband->n_channels;
6888 sband = NULL;
6889 }
6890
6891 if (!sband)
6892 sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
6893
6894 if (!sband || idx >= sband->n_channels) {
6895 ret = -ENOENT;
6896 goto exit;
6897 }
6898
6899 ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
6900
6901 spin_lock_bh(&ar->data_lock);
6902 memcpy(survey, ar_survey, sizeof(*survey));
6903 spin_unlock_bh(&ar->data_lock);
6904
6905 survey->channel = &sband->channels[idx];
6906
6907 if (ar->rx_channel == survey->channel)
6908 survey->filled |= SURVEY_INFO_IN_USE;
6909
6910 exit:
6911 mutex_unlock(&ar->conf_mutex);
6912 return ret;
6913 }
6914
6915 static bool
ath10k_mac_bitrate_mask_has_single_rate(struct ath10k * ar,enum nl80211_band band,const struct cfg80211_bitrate_mask * mask)6916 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6917 enum nl80211_band band,
6918 const struct cfg80211_bitrate_mask *mask)
6919 {
6920 int num_rates = 0;
6921 int i;
6922
6923 num_rates += hweight32(mask->control[band].legacy);
6924
6925 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6926 num_rates += hweight8(mask->control[band].ht_mcs[i]);
6927
6928 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
6929 num_rates += hweight16(mask->control[band].vht_mcs[i]);
6930
6931 return num_rates == 1;
6932 }
6933
6934 static bool
ath10k_mac_bitrate_mask_get_single_nss(struct ath10k * ar,enum nl80211_band band,const struct cfg80211_bitrate_mask * mask,int * nss)6935 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6936 enum nl80211_band band,
6937 const struct cfg80211_bitrate_mask *mask,
6938 int *nss)
6939 {
6940 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6941 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
6942 u8 ht_nss_mask = 0;
6943 u8 vht_nss_mask = 0;
6944 int i;
6945
6946 if (mask->control[band].legacy)
6947 return false;
6948
6949 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6950 if (mask->control[band].ht_mcs[i] == 0)
6951 continue;
6952 else if (mask->control[band].ht_mcs[i] ==
6953 sband->ht_cap.mcs.rx_mask[i])
6954 ht_nss_mask |= BIT(i);
6955 else
6956 return false;
6957 }
6958
6959 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6960 if (mask->control[band].vht_mcs[i] == 0)
6961 continue;
6962 else if (mask->control[band].vht_mcs[i] ==
6963 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
6964 vht_nss_mask |= BIT(i);
6965 else
6966 return false;
6967 }
6968
6969 if (ht_nss_mask != vht_nss_mask)
6970 return false;
6971
6972 if (ht_nss_mask == 0)
6973 return false;
6974
6975 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
6976 return false;
6977
6978 *nss = fls(ht_nss_mask);
6979
6980 return true;
6981 }
6982
6983 static int
ath10k_mac_bitrate_mask_get_single_rate(struct ath10k * ar,enum nl80211_band band,const struct cfg80211_bitrate_mask * mask,u8 * rate,u8 * nss)6984 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6985 enum nl80211_band band,
6986 const struct cfg80211_bitrate_mask *mask,
6987 u8 *rate, u8 *nss)
6988 {
6989 int rate_idx;
6990 int i;
6991 u16 bitrate;
6992 u8 preamble;
6993 u8 hw_rate;
6994
6995 if (hweight32(mask->control[band].legacy) == 1) {
6996 rate_idx = ffs(mask->control[band].legacy) - 1;
6997
6998 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
6999 rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
7000
7001 hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value;
7002 bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate;
7003
7004 if (ath10k_mac_bitrate_is_cck(bitrate))
7005 preamble = WMI_RATE_PREAMBLE_CCK;
7006 else
7007 preamble = WMI_RATE_PREAMBLE_OFDM;
7008
7009 *nss = 1;
7010 *rate = preamble << 6 |
7011 (*nss - 1) << 4 |
7012 hw_rate << 0;
7013
7014 return 0;
7015 }
7016
7017 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
7018 if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
7019 *nss = i + 1;
7020 *rate = WMI_RATE_PREAMBLE_HT << 6 |
7021 (*nss - 1) << 4 |
7022 (ffs(mask->control[band].ht_mcs[i]) - 1);
7023
7024 return 0;
7025 }
7026 }
7027
7028 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
7029 if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
7030 *nss = i + 1;
7031 *rate = WMI_RATE_PREAMBLE_VHT << 6 |
7032 (*nss - 1) << 4 |
7033 (ffs(mask->control[band].vht_mcs[i]) - 1);
7034
7035 return 0;
7036 }
7037 }
7038
7039 return -EINVAL;
7040 }
7041
ath10k_mac_set_fixed_rate_params(struct ath10k_vif * arvif,u8 rate,u8 nss,u8 sgi,u8 ldpc)7042 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
7043 u8 rate, u8 nss, u8 sgi, u8 ldpc)
7044 {
7045 struct ath10k *ar = arvif->ar;
7046 u32 vdev_param;
7047 int ret;
7048
7049 lockdep_assert_held(&ar->conf_mutex);
7050
7051 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
7052 arvif->vdev_id, rate, nss, sgi);
7053
7054 vdev_param = ar->wmi.vdev_param->fixed_rate;
7055 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
7056 if (ret) {
7057 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
7058 rate, ret);
7059 return ret;
7060 }
7061
7062 vdev_param = ar->wmi.vdev_param->nss;
7063 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
7064 if (ret) {
7065 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
7066 return ret;
7067 }
7068
7069 vdev_param = ar->wmi.vdev_param->sgi;
7070 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
7071 if (ret) {
7072 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
7073 return ret;
7074 }
7075
7076 vdev_param = ar->wmi.vdev_param->ldpc;
7077 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
7078 if (ret) {
7079 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
7080 return ret;
7081 }
7082
7083 return 0;
7084 }
7085
7086 static bool
ath10k_mac_can_set_bitrate_mask(struct ath10k * ar,enum nl80211_band band,const struct cfg80211_bitrate_mask * mask)7087 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
7088 enum nl80211_band band,
7089 const struct cfg80211_bitrate_mask *mask)
7090 {
7091 int i;
7092 u16 vht_mcs;
7093
7094 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
7095 * to express all VHT MCS rate masks. Effectively only the following
7096 * ranges can be used: none, 0-7, 0-8 and 0-9.
7097 */
7098 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
7099 vht_mcs = mask->control[band].vht_mcs[i];
7100
7101 switch (vht_mcs) {
7102 case 0:
7103 case BIT(8) - 1:
7104 case BIT(9) - 1:
7105 case BIT(10) - 1:
7106 break;
7107 default:
7108 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
7109 return false;
7110 }
7111 }
7112
7113 return true;
7114 }
7115
ath10k_mac_set_bitrate_mask_iter(void * data,struct ieee80211_sta * sta)7116 static void ath10k_mac_set_bitrate_mask_iter(void *data,
7117 struct ieee80211_sta *sta)
7118 {
7119 struct ath10k_vif *arvif = data;
7120 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7121 struct ath10k *ar = arvif->ar;
7122
7123 if (arsta->arvif != arvif)
7124 return;
7125
7126 spin_lock_bh(&ar->data_lock);
7127 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
7128 spin_unlock_bh(&ar->data_lock);
7129
7130 ieee80211_queue_work(ar->hw, &arsta->update_wk);
7131 }
7132
ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)7133 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
7134 struct ieee80211_vif *vif,
7135 const struct cfg80211_bitrate_mask *mask)
7136 {
7137 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7138 struct cfg80211_chan_def def;
7139 struct ath10k *ar = arvif->ar;
7140 enum nl80211_band band;
7141 const u8 *ht_mcs_mask;
7142 const u16 *vht_mcs_mask;
7143 u8 rate;
7144 u8 nss;
7145 u8 sgi;
7146 u8 ldpc;
7147 int single_nss;
7148 int ret;
7149
7150 if (ath10k_mac_vif_chan(vif, &def))
7151 return -EPERM;
7152
7153 band = def.chan->band;
7154 ht_mcs_mask = mask->control[band].ht_mcs;
7155 vht_mcs_mask = mask->control[band].vht_mcs;
7156 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
7157
7158 sgi = mask->control[band].gi;
7159 if (sgi == NL80211_TXRATE_FORCE_LGI)
7160 return -EINVAL;
7161
7162 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
7163 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
7164 &rate, &nss);
7165 if (ret) {
7166 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
7167 arvif->vdev_id, ret);
7168 return ret;
7169 }
7170 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
7171 &single_nss)) {
7172 rate = WMI_FIXED_RATE_NONE;
7173 nss = single_nss;
7174 } else {
7175 rate = WMI_FIXED_RATE_NONE;
7176 nss = min(ar->num_rf_chains,
7177 max(ath10k_mac_max_ht_nss(ht_mcs_mask),
7178 ath10k_mac_max_vht_nss(vht_mcs_mask)));
7179
7180 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
7181 return -EINVAL;
7182
7183 mutex_lock(&ar->conf_mutex);
7184
7185 arvif->bitrate_mask = *mask;
7186 ieee80211_iterate_stations_atomic(ar->hw,
7187 ath10k_mac_set_bitrate_mask_iter,
7188 arvif);
7189
7190 mutex_unlock(&ar->conf_mutex);
7191 }
7192
7193 mutex_lock(&ar->conf_mutex);
7194
7195 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
7196 if (ret) {
7197 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
7198 arvif->vdev_id, ret);
7199 goto exit;
7200 }
7201
7202 exit:
7203 mutex_unlock(&ar->conf_mutex);
7204
7205 return ret;
7206 }
7207
ath10k_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)7208 static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
7209 struct ieee80211_vif *vif,
7210 struct ieee80211_sta *sta,
7211 u32 changed)
7212 {
7213 struct ath10k *ar = hw->priv;
7214 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7215 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7216 struct ath10k_peer *peer;
7217 u32 bw, smps;
7218
7219 spin_lock_bh(&ar->data_lock);
7220
7221 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
7222 if (!peer) {
7223 spin_unlock_bh(&ar->data_lock);
7224 ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
7225 sta->addr, arvif->vdev_id);
7226 return;
7227 }
7228
7229 ath10k_dbg(ar, ATH10K_DBG_MAC,
7230 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
7231 sta->addr, changed, sta->bandwidth, sta->rx_nss,
7232 sta->smps_mode);
7233
7234 if (changed & IEEE80211_RC_BW_CHANGED) {
7235 bw = WMI_PEER_CHWIDTH_20MHZ;
7236
7237 switch (sta->bandwidth) {
7238 case IEEE80211_STA_RX_BW_20:
7239 bw = WMI_PEER_CHWIDTH_20MHZ;
7240 break;
7241 case IEEE80211_STA_RX_BW_40:
7242 bw = WMI_PEER_CHWIDTH_40MHZ;
7243 break;
7244 case IEEE80211_STA_RX_BW_80:
7245 bw = WMI_PEER_CHWIDTH_80MHZ;
7246 break;
7247 case IEEE80211_STA_RX_BW_160:
7248 bw = WMI_PEER_CHWIDTH_160MHZ;
7249 break;
7250 default:
7251 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
7252 sta->bandwidth, sta->addr);
7253 bw = WMI_PEER_CHWIDTH_20MHZ;
7254 break;
7255 }
7256
7257 arsta->bw = bw;
7258 }
7259
7260 if (changed & IEEE80211_RC_NSS_CHANGED)
7261 arsta->nss = sta->rx_nss;
7262
7263 if (changed & IEEE80211_RC_SMPS_CHANGED) {
7264 smps = WMI_PEER_SMPS_PS_NONE;
7265
7266 switch (sta->smps_mode) {
7267 case IEEE80211_SMPS_AUTOMATIC:
7268 case IEEE80211_SMPS_OFF:
7269 smps = WMI_PEER_SMPS_PS_NONE;
7270 break;
7271 case IEEE80211_SMPS_STATIC:
7272 smps = WMI_PEER_SMPS_STATIC;
7273 break;
7274 case IEEE80211_SMPS_DYNAMIC:
7275 smps = WMI_PEER_SMPS_DYNAMIC;
7276 break;
7277 case IEEE80211_SMPS_NUM_MODES:
7278 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
7279 sta->smps_mode, sta->addr);
7280 smps = WMI_PEER_SMPS_PS_NONE;
7281 break;
7282 }
7283
7284 arsta->smps = smps;
7285 }
7286
7287 arsta->changed |= changed;
7288
7289 spin_unlock_bh(&ar->data_lock);
7290
7291 ieee80211_queue_work(hw, &arsta->update_wk);
7292 }
7293
ath10k_offset_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif,s64 tsf_offset)7294 static void ath10k_offset_tsf(struct ieee80211_hw *hw,
7295 struct ieee80211_vif *vif, s64 tsf_offset)
7296 {
7297 struct ath10k *ar = hw->priv;
7298 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7299 u32 offset, vdev_param;
7300 int ret;
7301
7302 if (tsf_offset < 0) {
7303 vdev_param = ar->wmi.vdev_param->dec_tsf;
7304 offset = -tsf_offset;
7305 } else {
7306 vdev_param = ar->wmi.vdev_param->inc_tsf;
7307 offset = tsf_offset;
7308 }
7309
7310 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
7311 vdev_param, offset);
7312
7313 if (ret && ret != -EOPNOTSUPP)
7314 ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n",
7315 offset, vdev_param, ret);
7316 }
7317
ath10k_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)7318 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
7319 struct ieee80211_vif *vif,
7320 struct ieee80211_ampdu_params *params)
7321 {
7322 struct ath10k *ar = hw->priv;
7323 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7324 struct ieee80211_sta *sta = params->sta;
7325 enum ieee80211_ampdu_mlme_action action = params->action;
7326 u16 tid = params->tid;
7327
7328 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
7329 arvif->vdev_id, sta->addr, tid, action);
7330
7331 switch (action) {
7332 case IEEE80211_AMPDU_RX_START:
7333 case IEEE80211_AMPDU_RX_STOP:
7334 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
7335 * creation/removal. Do we need to verify this?
7336 */
7337 return 0;
7338 case IEEE80211_AMPDU_TX_START:
7339 case IEEE80211_AMPDU_TX_STOP_CONT:
7340 case IEEE80211_AMPDU_TX_STOP_FLUSH:
7341 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
7342 case IEEE80211_AMPDU_TX_OPERATIONAL:
7343 /* Firmware offloads Tx aggregation entirely so deny mac80211
7344 * Tx aggregation requests.
7345 */
7346 return -EOPNOTSUPP;
7347 }
7348
7349 return -EINVAL;
7350 }
7351
7352 static void
ath10k_mac_update_rx_channel(struct ath10k * ar,struct ieee80211_chanctx_conf * ctx,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs)7353 ath10k_mac_update_rx_channel(struct ath10k *ar,
7354 struct ieee80211_chanctx_conf *ctx,
7355 struct ieee80211_vif_chanctx_switch *vifs,
7356 int n_vifs)
7357 {
7358 struct cfg80211_chan_def *def = NULL;
7359
7360 /* Both locks are required because ar->rx_channel is modified. This
7361 * allows readers to hold either lock.
7362 */
7363 lockdep_assert_held(&ar->conf_mutex);
7364 lockdep_assert_held(&ar->data_lock);
7365
7366 WARN_ON(ctx && vifs);
7367 WARN_ON(vifs && !n_vifs);
7368
7369 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
7370 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
7371 * ppdu on Rx may reduce performance on low-end systems. It should be
7372 * possible to make tables/hashmaps to speed the lookup up (be vary of
7373 * cpu data cache lines though regarding sizes) but to keep the initial
7374 * implementation simple and less intrusive fallback to the slow lookup
7375 * only for multi-channel cases. Single-channel cases will remain to
7376 * use the old channel derival and thus performance should not be
7377 * affected much.
7378 */
7379 rcu_read_lock();
7380 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
7381 ieee80211_iter_chan_contexts_atomic(ar->hw,
7382 ath10k_mac_get_any_chandef_iter,
7383 &def);
7384
7385 if (vifs)
7386 def = &vifs[0].new_ctx->def;
7387
7388 ar->rx_channel = def->chan;
7389 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
7390 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
7391 /* During driver restart due to firmware assert, since mac80211
7392 * already has valid channel context for given radio, channel
7393 * context iteration return num_chanctx > 0. So fix rx_channel
7394 * when restart is in progress.
7395 */
7396 ar->rx_channel = ctx->def.chan;
7397 } else {
7398 ar->rx_channel = NULL;
7399 }
7400 rcu_read_unlock();
7401 }
7402
7403 static void
ath10k_mac_update_vif_chan(struct ath10k * ar,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs)7404 ath10k_mac_update_vif_chan(struct ath10k *ar,
7405 struct ieee80211_vif_chanctx_switch *vifs,
7406 int n_vifs)
7407 {
7408 struct ath10k_vif *arvif;
7409 int ret;
7410 int i;
7411
7412 lockdep_assert_held(&ar->conf_mutex);
7413
7414 /* First stop monitor interface. Some FW versions crash if there's a
7415 * lone monitor interface.
7416 */
7417 if (ar->monitor_started)
7418 ath10k_monitor_stop(ar);
7419
7420 for (i = 0; i < n_vifs; i++) {
7421 arvif = (void *)vifs[i].vif->drv_priv;
7422
7423 ath10k_dbg(ar, ATH10K_DBG_MAC,
7424 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
7425 arvif->vdev_id,
7426 vifs[i].old_ctx->def.chan->center_freq,
7427 vifs[i].new_ctx->def.chan->center_freq,
7428 vifs[i].old_ctx->def.width,
7429 vifs[i].new_ctx->def.width);
7430
7431 if (WARN_ON(!arvif->is_started))
7432 continue;
7433
7434 if (WARN_ON(!arvif->is_up))
7435 continue;
7436
7437 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7438 if (ret) {
7439 ath10k_warn(ar, "failed to down vdev %d: %d\n",
7440 arvif->vdev_id, ret);
7441 continue;
7442 }
7443 }
7444
7445 /* All relevant vdevs are downed and associated channel resources
7446 * should be available for the channel switch now.
7447 */
7448
7449 spin_lock_bh(&ar->data_lock);
7450 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
7451 spin_unlock_bh(&ar->data_lock);
7452
7453 for (i = 0; i < n_vifs; i++) {
7454 arvif = (void *)vifs[i].vif->drv_priv;
7455
7456 if (WARN_ON(!arvif->is_started))
7457 continue;
7458
7459 if (WARN_ON(!arvif->is_up))
7460 continue;
7461
7462 ret = ath10k_mac_setup_bcn_tmpl(arvif);
7463 if (ret)
7464 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
7465 ret);
7466
7467 ret = ath10k_mac_setup_prb_tmpl(arvif);
7468 if (ret)
7469 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
7470 ret);
7471
7472 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
7473 if (ret) {
7474 ath10k_warn(ar, "failed to restart vdev %d: %d\n",
7475 arvif->vdev_id, ret);
7476 continue;
7477 }
7478
7479 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
7480 arvif->bssid);
7481 if (ret) {
7482 ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
7483 arvif->vdev_id, ret);
7484 continue;
7485 }
7486 }
7487
7488 ath10k_monitor_recalc(ar);
7489 }
7490
7491 static int
ath10k_mac_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)7492 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
7493 struct ieee80211_chanctx_conf *ctx)
7494 {
7495 struct ath10k *ar = hw->priv;
7496
7497 ath10k_dbg(ar, ATH10K_DBG_MAC,
7498 "mac chanctx add freq %hu width %d ptr %pK\n",
7499 ctx->def.chan->center_freq, ctx->def.width, ctx);
7500
7501 mutex_lock(&ar->conf_mutex);
7502
7503 spin_lock_bh(&ar->data_lock);
7504 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
7505 spin_unlock_bh(&ar->data_lock);
7506
7507 ath10k_recalc_radar_detection(ar);
7508 ath10k_monitor_recalc(ar);
7509
7510 mutex_unlock(&ar->conf_mutex);
7511
7512 return 0;
7513 }
7514
7515 static void
ath10k_mac_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)7516 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
7517 struct ieee80211_chanctx_conf *ctx)
7518 {
7519 struct ath10k *ar = hw->priv;
7520
7521 ath10k_dbg(ar, ATH10K_DBG_MAC,
7522 "mac chanctx remove freq %hu width %d ptr %pK\n",
7523 ctx->def.chan->center_freq, ctx->def.width, ctx);
7524
7525 mutex_lock(&ar->conf_mutex);
7526
7527 spin_lock_bh(&ar->data_lock);
7528 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
7529 spin_unlock_bh(&ar->data_lock);
7530
7531 ath10k_recalc_radar_detection(ar);
7532 ath10k_monitor_recalc(ar);
7533
7534 mutex_unlock(&ar->conf_mutex);
7535 }
7536
7537 struct ath10k_mac_change_chanctx_arg {
7538 struct ieee80211_chanctx_conf *ctx;
7539 struct ieee80211_vif_chanctx_switch *vifs;
7540 int n_vifs;
7541 int next_vif;
7542 };
7543
7544 static void
ath10k_mac_change_chanctx_cnt_iter(void * data,u8 * mac,struct ieee80211_vif * vif)7545 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
7546 struct ieee80211_vif *vif)
7547 {
7548 struct ath10k_mac_change_chanctx_arg *arg = data;
7549
7550 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
7551 return;
7552
7553 arg->n_vifs++;
7554 }
7555
7556 static void
ath10k_mac_change_chanctx_fill_iter(void * data,u8 * mac,struct ieee80211_vif * vif)7557 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
7558 struct ieee80211_vif *vif)
7559 {
7560 struct ath10k_mac_change_chanctx_arg *arg = data;
7561 struct ieee80211_chanctx_conf *ctx;
7562
7563 ctx = rcu_access_pointer(vif->chanctx_conf);
7564 if (ctx != arg->ctx)
7565 return;
7566
7567 if (WARN_ON(arg->next_vif == arg->n_vifs))
7568 return;
7569
7570 arg->vifs[arg->next_vif].vif = vif;
7571 arg->vifs[arg->next_vif].old_ctx = ctx;
7572 arg->vifs[arg->next_vif].new_ctx = ctx;
7573 arg->next_vif++;
7574 }
7575
7576 static void
ath10k_mac_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)7577 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
7578 struct ieee80211_chanctx_conf *ctx,
7579 u32 changed)
7580 {
7581 struct ath10k *ar = hw->priv;
7582 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
7583
7584 mutex_lock(&ar->conf_mutex);
7585
7586 ath10k_dbg(ar, ATH10K_DBG_MAC,
7587 "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
7588 ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
7589
7590 /* This shouldn't really happen because channel switching should use
7591 * switch_vif_chanctx().
7592 */
7593 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
7594 goto unlock;
7595
7596 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
7597 ieee80211_iterate_active_interfaces_atomic(
7598 hw,
7599 IEEE80211_IFACE_ITER_NORMAL,
7600 ath10k_mac_change_chanctx_cnt_iter,
7601 &arg);
7602 if (arg.n_vifs == 0)
7603 goto radar;
7604
7605 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
7606 GFP_KERNEL);
7607 if (!arg.vifs)
7608 goto radar;
7609
7610 ieee80211_iterate_active_interfaces_atomic(
7611 hw,
7612 IEEE80211_IFACE_ITER_NORMAL,
7613 ath10k_mac_change_chanctx_fill_iter,
7614 &arg);
7615 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
7616 kfree(arg.vifs);
7617 }
7618
7619 radar:
7620 ath10k_recalc_radar_detection(ar);
7621
7622 /* FIXME: How to configure Rx chains properly? */
7623
7624 /* No other actions are actually necessary. Firmware maintains channel
7625 * definitions per vdev internally and there's no host-side channel
7626 * context abstraction to configure, e.g. channel width.
7627 */
7628
7629 unlock:
7630 mutex_unlock(&ar->conf_mutex);
7631 }
7632
7633 static int
ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)7634 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
7635 struct ieee80211_vif *vif,
7636 struct ieee80211_chanctx_conf *ctx)
7637 {
7638 struct ath10k *ar = hw->priv;
7639 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7640 int ret;
7641
7642 mutex_lock(&ar->conf_mutex);
7643
7644 ath10k_dbg(ar, ATH10K_DBG_MAC,
7645 "mac chanctx assign ptr %pK vdev_id %i\n",
7646 ctx, arvif->vdev_id);
7647
7648 if (WARN_ON(arvif->is_started)) {
7649 mutex_unlock(&ar->conf_mutex);
7650 return -EBUSY;
7651 }
7652
7653 ret = ath10k_vdev_start(arvif, &ctx->def);
7654 if (ret) {
7655 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
7656 arvif->vdev_id, vif->addr,
7657 ctx->def.chan->center_freq, ret);
7658 goto err;
7659 }
7660
7661 arvif->is_started = true;
7662
7663 ret = ath10k_mac_vif_setup_ps(arvif);
7664 if (ret) {
7665 ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
7666 arvif->vdev_id, ret);
7667 goto err_stop;
7668 }
7669
7670 if (vif->type == NL80211_IFTYPE_MONITOR) {
7671 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
7672 if (ret) {
7673 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
7674 arvif->vdev_id, ret);
7675 goto err_stop;
7676 }
7677
7678 arvif->is_up = true;
7679 }
7680
7681 if (ath10k_mac_can_set_cts_prot(arvif)) {
7682 ret = ath10k_mac_set_cts_prot(arvif);
7683 if (ret)
7684 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
7685 arvif->vdev_id, ret);
7686 }
7687
7688 if (ath10k_peer_stats_enabled(ar)) {
7689 ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS;
7690 ret = ath10k_wmi_pdev_pktlog_enable(ar,
7691 ar->pktlog_filter);
7692 if (ret) {
7693 ath10k_warn(ar, "failed to enable pktlog %d\n", ret);
7694 goto err_stop;
7695 }
7696 }
7697
7698 mutex_unlock(&ar->conf_mutex);
7699 return 0;
7700
7701 err_stop:
7702 ath10k_vdev_stop(arvif);
7703 arvif->is_started = false;
7704 ath10k_mac_vif_setup_ps(arvif);
7705
7706 err:
7707 mutex_unlock(&ar->conf_mutex);
7708 return ret;
7709 }
7710
7711 static void
ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)7712 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
7713 struct ieee80211_vif *vif,
7714 struct ieee80211_chanctx_conf *ctx)
7715 {
7716 struct ath10k *ar = hw->priv;
7717 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7718 int ret;
7719
7720 mutex_lock(&ar->conf_mutex);
7721
7722 ath10k_dbg(ar, ATH10K_DBG_MAC,
7723 "mac chanctx unassign ptr %pK vdev_id %i\n",
7724 ctx, arvif->vdev_id);
7725
7726 WARN_ON(!arvif->is_started);
7727
7728 if (vif->type == NL80211_IFTYPE_MONITOR) {
7729 WARN_ON(!arvif->is_up);
7730
7731 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7732 if (ret)
7733 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
7734 arvif->vdev_id, ret);
7735
7736 arvif->is_up = false;
7737 }
7738
7739 ret = ath10k_vdev_stop(arvif);
7740 if (ret)
7741 ath10k_warn(ar, "failed to stop vdev %i: %d\n",
7742 arvif->vdev_id, ret);
7743
7744 arvif->is_started = false;
7745
7746 mutex_unlock(&ar->conf_mutex);
7747 }
7748
7749 static int
ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)7750 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
7751 struct ieee80211_vif_chanctx_switch *vifs,
7752 int n_vifs,
7753 enum ieee80211_chanctx_switch_mode mode)
7754 {
7755 struct ath10k *ar = hw->priv;
7756
7757 mutex_lock(&ar->conf_mutex);
7758
7759 ath10k_dbg(ar, ATH10K_DBG_MAC,
7760 "mac chanctx switch n_vifs %d mode %d\n",
7761 n_vifs, mode);
7762 ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
7763
7764 mutex_unlock(&ar->conf_mutex);
7765 return 0;
7766 }
7767
ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)7768 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
7769 struct ieee80211_vif *vif,
7770 struct ieee80211_sta *sta)
7771 {
7772 struct ath10k *ar;
7773 struct ath10k_peer *peer;
7774
7775 ar = hw->priv;
7776
7777 list_for_each_entry(peer, &ar->peers, list)
7778 if (peer->sta == sta)
7779 peer->removed = true;
7780 }
7781
ath10k_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)7782 static void ath10k_sta_statistics(struct ieee80211_hw *hw,
7783 struct ieee80211_vif *vif,
7784 struct ieee80211_sta *sta,
7785 struct station_info *sinfo)
7786 {
7787 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7788 struct ath10k *ar = arsta->arvif->ar;
7789
7790 if (!ath10k_peer_stats_enabled(ar))
7791 return;
7792
7793 sinfo->rx_duration = arsta->rx_duration;
7794 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
7795
7796 if (!arsta->txrate.legacy && !arsta->txrate.nss)
7797 return;
7798
7799 if (arsta->txrate.legacy) {
7800 sinfo->txrate.legacy = arsta->txrate.legacy;
7801 } else {
7802 sinfo->txrate.mcs = arsta->txrate.mcs;
7803 sinfo->txrate.nss = arsta->txrate.nss;
7804 sinfo->txrate.bw = arsta->txrate.bw;
7805 }
7806 sinfo->txrate.flags = arsta->txrate.flags;
7807 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
7808 }
7809
7810 static const struct ieee80211_ops ath10k_ops = {
7811 .tx = ath10k_mac_op_tx,
7812 .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
7813 .start = ath10k_start,
7814 .stop = ath10k_stop,
7815 .config = ath10k_config,
7816 .add_interface = ath10k_add_interface,
7817 .remove_interface = ath10k_remove_interface,
7818 .configure_filter = ath10k_configure_filter,
7819 .bss_info_changed = ath10k_bss_info_changed,
7820 .set_coverage_class = ath10k_mac_op_set_coverage_class,
7821 .hw_scan = ath10k_hw_scan,
7822 .cancel_hw_scan = ath10k_cancel_hw_scan,
7823 .set_key = ath10k_set_key,
7824 .set_default_unicast_key = ath10k_set_default_unicast_key,
7825 .sta_state = ath10k_sta_state,
7826 .conf_tx = ath10k_conf_tx,
7827 .remain_on_channel = ath10k_remain_on_channel,
7828 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
7829 .set_rts_threshold = ath10k_set_rts_threshold,
7830 .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
7831 .flush = ath10k_flush,
7832 .tx_last_beacon = ath10k_tx_last_beacon,
7833 .set_antenna = ath10k_set_antenna,
7834 .get_antenna = ath10k_get_antenna,
7835 .reconfig_complete = ath10k_reconfig_complete,
7836 .get_survey = ath10k_get_survey,
7837 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
7838 .sta_rc_update = ath10k_sta_rc_update,
7839 .offset_tsf = ath10k_offset_tsf,
7840 .ampdu_action = ath10k_ampdu_action,
7841 .get_et_sset_count = ath10k_debug_get_et_sset_count,
7842 .get_et_stats = ath10k_debug_get_et_stats,
7843 .get_et_strings = ath10k_debug_get_et_strings,
7844 .add_chanctx = ath10k_mac_op_add_chanctx,
7845 .remove_chanctx = ath10k_mac_op_remove_chanctx,
7846 .change_chanctx = ath10k_mac_op_change_chanctx,
7847 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
7848 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
7849 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
7850 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove,
7851 .sta_statistics = ath10k_sta_statistics,
7852
7853 CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
7854
7855 #ifdef CONFIG_PM
7856 .suspend = ath10k_wow_op_suspend,
7857 .resume = ath10k_wow_op_resume,
7858 .set_wakeup = ath10k_wow_op_set_wakeup,
7859 #endif
7860 #ifdef CONFIG_MAC80211_DEBUGFS
7861 .sta_add_debugfs = ath10k_sta_add_debugfs,
7862 #endif
7863 };
7864
7865 #define CHAN2G(_channel, _freq, _flags) { \
7866 .band = NL80211_BAND_2GHZ, \
7867 .hw_value = (_channel), \
7868 .center_freq = (_freq), \
7869 .flags = (_flags), \
7870 .max_antenna_gain = 0, \
7871 .max_power = 30, \
7872 }
7873
7874 #define CHAN5G(_channel, _freq, _flags) { \
7875 .band = NL80211_BAND_5GHZ, \
7876 .hw_value = (_channel), \
7877 .center_freq = (_freq), \
7878 .flags = (_flags), \
7879 .max_antenna_gain = 0, \
7880 .max_power = 30, \
7881 }
7882
7883 static const struct ieee80211_channel ath10k_2ghz_channels[] = {
7884 CHAN2G(1, 2412, 0),
7885 CHAN2G(2, 2417, 0),
7886 CHAN2G(3, 2422, 0),
7887 CHAN2G(4, 2427, 0),
7888 CHAN2G(5, 2432, 0),
7889 CHAN2G(6, 2437, 0),
7890 CHAN2G(7, 2442, 0),
7891 CHAN2G(8, 2447, 0),
7892 CHAN2G(9, 2452, 0),
7893 CHAN2G(10, 2457, 0),
7894 CHAN2G(11, 2462, 0),
7895 CHAN2G(12, 2467, 0),
7896 CHAN2G(13, 2472, 0),
7897 CHAN2G(14, 2484, 0),
7898 };
7899
7900 static const struct ieee80211_channel ath10k_5ghz_channels[] = {
7901 CHAN5G(36, 5180, 0),
7902 CHAN5G(40, 5200, 0),
7903 CHAN5G(44, 5220, 0),
7904 CHAN5G(48, 5240, 0),
7905 CHAN5G(52, 5260, 0),
7906 CHAN5G(56, 5280, 0),
7907 CHAN5G(60, 5300, 0),
7908 CHAN5G(64, 5320, 0),
7909 CHAN5G(100, 5500, 0),
7910 CHAN5G(104, 5520, 0),
7911 CHAN5G(108, 5540, 0),
7912 CHAN5G(112, 5560, 0),
7913 CHAN5G(116, 5580, 0),
7914 CHAN5G(120, 5600, 0),
7915 CHAN5G(124, 5620, 0),
7916 CHAN5G(128, 5640, 0),
7917 CHAN5G(132, 5660, 0),
7918 CHAN5G(136, 5680, 0),
7919 CHAN5G(140, 5700, 0),
7920 CHAN5G(144, 5720, 0),
7921 CHAN5G(149, 5745, 0),
7922 CHAN5G(153, 5765, 0),
7923 CHAN5G(157, 5785, 0),
7924 CHAN5G(161, 5805, 0),
7925 CHAN5G(165, 5825, 0),
7926 CHAN5G(169, 5845, 0),
7927 CHAN5G(173, 5865, 0),
7928 /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */
7929 /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */
7930 };
7931
ath10k_mac_create(size_t priv_size)7932 struct ath10k *ath10k_mac_create(size_t priv_size)
7933 {
7934 struct ieee80211_hw *hw;
7935 struct ieee80211_ops *ops;
7936 struct ath10k *ar;
7937
7938 ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
7939 if (!ops)
7940 return NULL;
7941
7942 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
7943 if (!hw) {
7944 kfree(ops);
7945 return NULL;
7946 }
7947
7948 ar = hw->priv;
7949 ar->hw = hw;
7950 ar->ops = ops;
7951
7952 return ar;
7953 }
7954
ath10k_mac_destroy(struct ath10k * ar)7955 void ath10k_mac_destroy(struct ath10k *ar)
7956 {
7957 struct ieee80211_ops *ops = ar->ops;
7958
7959 ieee80211_free_hw(ar->hw);
7960 kfree(ops);
7961 }
7962
7963 static const struct ieee80211_iface_limit ath10k_if_limits[] = {
7964 {
7965 .max = 8,
7966 .types = BIT(NL80211_IFTYPE_STATION)
7967 | BIT(NL80211_IFTYPE_P2P_CLIENT)
7968 },
7969 {
7970 .max = 3,
7971 .types = BIT(NL80211_IFTYPE_P2P_GO)
7972 },
7973 {
7974 .max = 1,
7975 .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
7976 },
7977 {
7978 .max = 7,
7979 .types = BIT(NL80211_IFTYPE_AP)
7980 #ifdef CONFIG_MAC80211_MESH
7981 | BIT(NL80211_IFTYPE_MESH_POINT)
7982 #endif
7983 },
7984 };
7985
7986 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
7987 {
7988 .max = 8,
7989 .types = BIT(NL80211_IFTYPE_AP)
7990 #ifdef CONFIG_MAC80211_MESH
7991 | BIT(NL80211_IFTYPE_MESH_POINT)
7992 #endif
7993 },
7994 {
7995 .max = 1,
7996 .types = BIT(NL80211_IFTYPE_STATION)
7997 },
7998 };
7999
8000 static const struct ieee80211_iface_combination ath10k_if_comb[] = {
8001 {
8002 .limits = ath10k_if_limits,
8003 .n_limits = ARRAY_SIZE(ath10k_if_limits),
8004 .max_interfaces = 8,
8005 .num_different_channels = 1,
8006 .beacon_int_infra_match = true,
8007 },
8008 };
8009
8010 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
8011 {
8012 .limits = ath10k_10x_if_limits,
8013 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
8014 .max_interfaces = 8,
8015 .num_different_channels = 1,
8016 .beacon_int_infra_match = true,
8017 .beacon_int_min_gcd = 1,
8018 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
8019 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
8020 BIT(NL80211_CHAN_WIDTH_20) |
8021 BIT(NL80211_CHAN_WIDTH_40) |
8022 BIT(NL80211_CHAN_WIDTH_80),
8023 #endif
8024 },
8025 };
8026
8027 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
8028 {
8029 .max = 2,
8030 .types = BIT(NL80211_IFTYPE_STATION),
8031 },
8032 {
8033 .max = 2,
8034 .types = BIT(NL80211_IFTYPE_AP) |
8035 #ifdef CONFIG_MAC80211_MESH
8036 BIT(NL80211_IFTYPE_MESH_POINT) |
8037 #endif
8038 BIT(NL80211_IFTYPE_P2P_CLIENT) |
8039 BIT(NL80211_IFTYPE_P2P_GO),
8040 },
8041 {
8042 .max = 1,
8043 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
8044 },
8045 };
8046
8047 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
8048 {
8049 .max = 2,
8050 .types = BIT(NL80211_IFTYPE_STATION),
8051 },
8052 {
8053 .max = 2,
8054 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
8055 },
8056 {
8057 .max = 1,
8058 .types = BIT(NL80211_IFTYPE_AP) |
8059 #ifdef CONFIG_MAC80211_MESH
8060 BIT(NL80211_IFTYPE_MESH_POINT) |
8061 #endif
8062 BIT(NL80211_IFTYPE_P2P_GO),
8063 },
8064 {
8065 .max = 1,
8066 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
8067 },
8068 };
8069
8070 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
8071 {
8072 .max = 1,
8073 .types = BIT(NL80211_IFTYPE_STATION),
8074 },
8075 {
8076 .max = 1,
8077 .types = BIT(NL80211_IFTYPE_ADHOC),
8078 },
8079 };
8080
8081 /* FIXME: This is not thouroughly tested. These combinations may over- or
8082 * underestimate hw/fw capabilities.
8083 */
8084 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
8085 {
8086 .limits = ath10k_tlv_if_limit,
8087 .num_different_channels = 1,
8088 .max_interfaces = 4,
8089 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
8090 },
8091 {
8092 .limits = ath10k_tlv_if_limit_ibss,
8093 .num_different_channels = 1,
8094 .max_interfaces = 2,
8095 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
8096 },
8097 };
8098
8099 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
8100 {
8101 .limits = ath10k_tlv_if_limit,
8102 .num_different_channels = 1,
8103 .max_interfaces = 4,
8104 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
8105 },
8106 {
8107 .limits = ath10k_tlv_qcs_if_limit,
8108 .num_different_channels = 2,
8109 .max_interfaces = 4,
8110 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
8111 },
8112 {
8113 .limits = ath10k_tlv_if_limit_ibss,
8114 .num_different_channels = 1,
8115 .max_interfaces = 2,
8116 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
8117 },
8118 };
8119
8120 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
8121 {
8122 .max = 1,
8123 .types = BIT(NL80211_IFTYPE_STATION),
8124 },
8125 {
8126 .max = 16,
8127 .types = BIT(NL80211_IFTYPE_AP)
8128 #ifdef CONFIG_MAC80211_MESH
8129 | BIT(NL80211_IFTYPE_MESH_POINT)
8130 #endif
8131 },
8132 };
8133
8134 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
8135 {
8136 .limits = ath10k_10_4_if_limits,
8137 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
8138 .max_interfaces = 16,
8139 .num_different_channels = 1,
8140 .beacon_int_infra_match = true,
8141 .beacon_int_min_gcd = 1,
8142 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
8143 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
8144 BIT(NL80211_CHAN_WIDTH_20) |
8145 BIT(NL80211_CHAN_WIDTH_40) |
8146 BIT(NL80211_CHAN_WIDTH_80),
8147 #endif
8148 },
8149 };
8150
ath10k_get_arvif_iter(void * data,u8 * mac,struct ieee80211_vif * vif)8151 static void ath10k_get_arvif_iter(void *data, u8 *mac,
8152 struct ieee80211_vif *vif)
8153 {
8154 struct ath10k_vif_iter *arvif_iter = data;
8155 struct ath10k_vif *arvif = (void *)vif->drv_priv;
8156
8157 if (arvif->vdev_id == arvif_iter->vdev_id)
8158 arvif_iter->arvif = arvif;
8159 }
8160
ath10k_get_arvif(struct ath10k * ar,u32 vdev_id)8161 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
8162 {
8163 struct ath10k_vif_iter arvif_iter;
8164 u32 flags;
8165
8166 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
8167 arvif_iter.vdev_id = vdev_id;
8168
8169 flags = IEEE80211_IFACE_ITER_RESUME_ALL;
8170 ieee80211_iterate_active_interfaces_atomic(ar->hw,
8171 flags,
8172 ath10k_get_arvif_iter,
8173 &arvif_iter);
8174 if (!arvif_iter.arvif) {
8175 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
8176 return NULL;
8177 }
8178
8179 return arvif_iter.arvif;
8180 }
8181
8182 #define WRD_METHOD "WRDD"
8183 #define WRDD_WIFI (0x07)
8184
ath10k_mac_wrdd_get_mcc(struct ath10k * ar,union acpi_object * wrdd)8185 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
8186 {
8187 union acpi_object *mcc_pkg;
8188 union acpi_object *domain_type;
8189 union acpi_object *mcc_value;
8190 u32 i;
8191
8192 if (wrdd->type != ACPI_TYPE_PACKAGE ||
8193 wrdd->package.count < 2 ||
8194 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
8195 wrdd->package.elements[0].integer.value != 0) {
8196 ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n");
8197 return 0;
8198 }
8199
8200 for (i = 1; i < wrdd->package.count; ++i) {
8201 mcc_pkg = &wrdd->package.elements[i];
8202
8203 if (mcc_pkg->type != ACPI_TYPE_PACKAGE)
8204 continue;
8205 if (mcc_pkg->package.count < 2)
8206 continue;
8207 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
8208 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
8209 continue;
8210
8211 domain_type = &mcc_pkg->package.elements[0];
8212 if (domain_type->integer.value != WRDD_WIFI)
8213 continue;
8214
8215 mcc_value = &mcc_pkg->package.elements[1];
8216 return mcc_value->integer.value;
8217 }
8218 return 0;
8219 }
8220
ath10k_mac_get_wrdd_regulatory(struct ath10k * ar,u16 * rd)8221 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
8222 {
8223 struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev);
8224 acpi_handle root_handle;
8225 acpi_handle handle;
8226 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
8227 acpi_status status;
8228 u32 alpha2_code;
8229 char alpha2[3];
8230
8231 root_handle = ACPI_HANDLE(&pdev->dev);
8232 if (!root_handle)
8233 return -EOPNOTSUPP;
8234
8235 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
8236 if (ACPI_FAILURE(status)) {
8237 ath10k_dbg(ar, ATH10K_DBG_BOOT,
8238 "failed to get wrd method %d\n", status);
8239 return -EIO;
8240 }
8241
8242 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
8243 if (ACPI_FAILURE(status)) {
8244 ath10k_dbg(ar, ATH10K_DBG_BOOT,
8245 "failed to call wrdc %d\n", status);
8246 return -EIO;
8247 }
8248
8249 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer);
8250 kfree(wrdd.pointer);
8251 if (!alpha2_code)
8252 return -EIO;
8253
8254 alpha2[0] = (alpha2_code >> 8) & 0xff;
8255 alpha2[1] = (alpha2_code >> 0) & 0xff;
8256 alpha2[2] = '\0';
8257
8258 ath10k_dbg(ar, ATH10K_DBG_BOOT,
8259 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2);
8260
8261 *rd = ath_regd_find_country_by_name(alpha2);
8262 if (*rd == 0xffff)
8263 return -EIO;
8264
8265 *rd |= COUNTRY_ERD_FLAG;
8266 return 0;
8267 }
8268
ath10k_mac_init_rd(struct ath10k * ar)8269 static int ath10k_mac_init_rd(struct ath10k *ar)
8270 {
8271 int ret;
8272 u16 rd;
8273
8274 ret = ath10k_mac_get_wrdd_regulatory(ar, &rd);
8275 if (ret) {
8276 ath10k_dbg(ar, ATH10K_DBG_BOOT,
8277 "fallback to eeprom programmed regulatory settings\n");
8278 rd = ar->hw_eeprom_rd;
8279 }
8280
8281 ar->ath_common.regulatory.current_rd = rd;
8282 return 0;
8283 }
8284
ath10k_mac_register(struct ath10k * ar)8285 int ath10k_mac_register(struct ath10k *ar)
8286 {
8287 static const u32 cipher_suites[] = {
8288 WLAN_CIPHER_SUITE_WEP40,
8289 WLAN_CIPHER_SUITE_WEP104,
8290 WLAN_CIPHER_SUITE_TKIP,
8291 WLAN_CIPHER_SUITE_CCMP,
8292
8293 /* Do not add hardware supported ciphers before this line.
8294 * Allow software encryption for all chips. Don't forget to
8295 * update n_cipher_suites below.
8296 */
8297 WLAN_CIPHER_SUITE_AES_CMAC,
8298 WLAN_CIPHER_SUITE_BIP_CMAC_256,
8299 WLAN_CIPHER_SUITE_BIP_GMAC_128,
8300 WLAN_CIPHER_SUITE_BIP_GMAC_256,
8301
8302 /* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256
8303 * and CCMP-256 in hardware.
8304 */
8305 WLAN_CIPHER_SUITE_GCMP,
8306 WLAN_CIPHER_SUITE_GCMP_256,
8307 WLAN_CIPHER_SUITE_CCMP_256,
8308 };
8309 struct ieee80211_supported_band *band;
8310 void *channels;
8311 int ret;
8312
8313 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
8314
8315 SET_IEEE80211_DEV(ar->hw, ar->dev);
8316
8317 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
8318 ARRAY_SIZE(ath10k_5ghz_channels)) !=
8319 ATH10K_NUM_CHANS);
8320
8321 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
8322 channels = kmemdup(ath10k_2ghz_channels,
8323 sizeof(ath10k_2ghz_channels),
8324 GFP_KERNEL);
8325 if (!channels) {
8326 ret = -ENOMEM;
8327 goto err_free;
8328 }
8329
8330 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
8331 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
8332 band->channels = channels;
8333
8334 if (ar->hw_params.cck_rate_map_rev2) {
8335 band->n_bitrates = ath10k_g_rates_rev2_size;
8336 band->bitrates = ath10k_g_rates_rev2;
8337 } else {
8338 band->n_bitrates = ath10k_g_rates_size;
8339 band->bitrates = ath10k_g_rates;
8340 }
8341
8342 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
8343 }
8344
8345 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
8346 channels = kmemdup(ath10k_5ghz_channels,
8347 sizeof(ath10k_5ghz_channels),
8348 GFP_KERNEL);
8349 if (!channels) {
8350 ret = -ENOMEM;
8351 goto err_free;
8352 }
8353
8354 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
8355 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
8356 band->channels = channels;
8357 band->n_bitrates = ath10k_a_rates_size;
8358 band->bitrates = ath10k_a_rates;
8359 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
8360 }
8361
8362 ath10k_mac_setup_ht_vht_cap(ar);
8363
8364 ar->hw->wiphy->interface_modes =
8365 BIT(NL80211_IFTYPE_STATION) |
8366 BIT(NL80211_IFTYPE_AP) |
8367 BIT(NL80211_IFTYPE_MESH_POINT);
8368
8369 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
8370 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
8371
8372 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
8373 ar->hw->wiphy->interface_modes |=
8374 BIT(NL80211_IFTYPE_P2P_DEVICE) |
8375 BIT(NL80211_IFTYPE_P2P_CLIENT) |
8376 BIT(NL80211_IFTYPE_P2P_GO);
8377
8378 ieee80211_hw_set(ar->hw, SIGNAL_DBM);
8379
8380 if (!test_bit(ATH10K_FW_FEATURE_NO_PS,
8381 ar->running_fw->fw_file.fw_features)) {
8382 ieee80211_hw_set(ar->hw, SUPPORTS_PS);
8383 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
8384 }
8385
8386 ieee80211_hw_set(ar->hw, MFP_CAPABLE);
8387 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
8388 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
8389 ieee80211_hw_set(ar->hw, AP_LINK_PS);
8390 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
8391 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
8392 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
8393 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
8394 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
8395 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
8396 ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
8397 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
8398 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
8399
8400 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
8401 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
8402
8403 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
8404 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
8405
8406 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
8407 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
8408
8409 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
8410 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
8411 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
8412 }
8413
8414 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
8415 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
8416
8417 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
8418 ar->hw->sta_data_size = sizeof(struct ath10k_sta);
8419 ar->hw->txq_data_size = sizeof(struct ath10k_txq);
8420
8421 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
8422
8423 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
8424 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
8425
8426 /* Firmware delivers WPS/P2P Probe Requests frames to driver so
8427 * that userspace (e.g. wpa_supplicant/hostapd) can generate
8428 * correct Probe Responses. This is more of a hack advert..
8429 */
8430 ar->hw->wiphy->probe_resp_offload |=
8431 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
8432 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
8433 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
8434 }
8435
8436 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
8437 test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
8438 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
8439 if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map))
8440 ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
8441 }
8442
8443 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
8444 ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA);
8445
8446 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
8447 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
8448 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
8449
8450 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
8451 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
8452 NL80211_FEATURE_AP_SCAN;
8453
8454 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
8455
8456 ret = ath10k_wow_init(ar);
8457 if (ret) {
8458 ath10k_warn(ar, "failed to init wow: %d\n", ret);
8459 goto err_free;
8460 }
8461
8462 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
8463 wiphy_ext_feature_set(ar->hw->wiphy,
8464 NL80211_EXT_FEATURE_SET_SCAN_DWELL);
8465
8466 /*
8467 * on LL hardware queues are managed entirely by the FW
8468 * so we only advertise to mac we can do the queues thing
8469 */
8470 ar->hw->queues = IEEE80211_MAX_QUEUES;
8471
8472 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
8473 * something that vdev_ids can't reach so that we don't stop the queue
8474 * accidentally.
8475 */
8476 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
8477
8478 switch (ar->running_fw->fw_file.wmi_op_version) {
8479 case ATH10K_FW_WMI_OP_VERSION_MAIN:
8480 ar->hw->wiphy->iface_combinations = ath10k_if_comb;
8481 ar->hw->wiphy->n_iface_combinations =
8482 ARRAY_SIZE(ath10k_if_comb);
8483 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
8484 break;
8485 case ATH10K_FW_WMI_OP_VERSION_TLV:
8486 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
8487 ar->hw->wiphy->iface_combinations =
8488 ath10k_tlv_qcs_if_comb;
8489 ar->hw->wiphy->n_iface_combinations =
8490 ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
8491 } else {
8492 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
8493 ar->hw->wiphy->n_iface_combinations =
8494 ARRAY_SIZE(ath10k_tlv_if_comb);
8495 }
8496 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
8497 break;
8498 case ATH10K_FW_WMI_OP_VERSION_10_1:
8499 case ATH10K_FW_WMI_OP_VERSION_10_2:
8500 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
8501 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
8502 ar->hw->wiphy->n_iface_combinations =
8503 ARRAY_SIZE(ath10k_10x_if_comb);
8504 break;
8505 case ATH10K_FW_WMI_OP_VERSION_10_4:
8506 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
8507 ar->hw->wiphy->n_iface_combinations =
8508 ARRAY_SIZE(ath10k_10_4_if_comb);
8509 break;
8510 case ATH10K_FW_WMI_OP_VERSION_UNSET:
8511 case ATH10K_FW_WMI_OP_VERSION_MAX:
8512 WARN_ON(1);
8513 ret = -EINVAL;
8514 goto err_free;
8515 }
8516
8517 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
8518 ar->hw->netdev_features = NETIF_F_HW_CSUM;
8519
8520 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
8521 /* Init ath dfs pattern detector */
8522 ar->ath_common.debug_mask = ATH_DBG_DFS;
8523 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
8524 NL80211_DFS_UNSET);
8525
8526 if (!ar->dfs_detector)
8527 ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
8528 }
8529
8530 ret = ath10k_mac_init_rd(ar);
8531 if (ret) {
8532 ath10k_err(ar, "failed to derive regdom: %d\n", ret);
8533 goto err_dfs_detector_exit;
8534 }
8535
8536 /* Disable set_coverage_class for chipsets that do not support it. */
8537 if (!ar->hw_params.hw_ops->set_coverage_class)
8538 ar->ops->set_coverage_class = NULL;
8539
8540 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
8541 ath10k_reg_notifier);
8542 if (ret) {
8543 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
8544 goto err_dfs_detector_exit;
8545 }
8546
8547 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
8548 ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
8549 if (ret) {
8550 ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
8551 goto err_dfs_detector_exit;
8552 }
8553
8554 ar->hw->wiphy->features |=
8555 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
8556 }
8557
8558 ar->hw->wiphy->cipher_suites = cipher_suites;
8559
8560 /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128
8561 * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported
8562 * from chip specific hw_param table.
8563 */
8564 if (!ar->hw_params.n_cipher_suites ||
8565 ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) {
8566 ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n",
8567 ar->hw_params.n_cipher_suites);
8568 ar->hw_params.n_cipher_suites = 8;
8569 }
8570 ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites;
8571
8572 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
8573
8574 ret = ieee80211_register_hw(ar->hw);
8575 if (ret) {
8576 ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
8577 goto err_dfs_detector_exit;
8578 }
8579
8580 if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
8581 ret = regulatory_hint(ar->hw->wiphy,
8582 ar->ath_common.regulatory.alpha2);
8583 if (ret)
8584 goto err_unregister;
8585 }
8586
8587 return 0;
8588
8589 err_unregister:
8590 ieee80211_unregister_hw(ar->hw);
8591
8592 err_dfs_detector_exit:
8593 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8594 ar->dfs_detector->exit(ar->dfs_detector);
8595
8596 err_free:
8597 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8598 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8599
8600 SET_IEEE80211_DEV(ar->hw, NULL);
8601 return ret;
8602 }
8603
ath10k_mac_unregister(struct ath10k * ar)8604 void ath10k_mac_unregister(struct ath10k *ar)
8605 {
8606 ieee80211_unregister_hw(ar->hw);
8607
8608 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8609 ar->dfs_detector->exit(ar->dfs_detector);
8610
8611 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8612 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8613
8614 SET_IEEE80211_DEV(ar->hw, NULL);
8615 }
8616