1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
25 * USA
26 *
27 * The full GNU General Public License is included in this distribution
28 * in the file called COPYING.
29 *
30 * Contact Information:
31 * Intel Linux Wireless <linuxwifi@intel.com>
32 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 *
34 * BSD LICENSE
35 *
36 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
39 * Copyright(c) 2018 Intel Corporation
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 *
46 * * Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * * Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in
50 * the documentation and/or other materials provided with the
51 * distribution.
52 * * Neither the name Intel Corporation nor the names of its
53 * contributors may be used to endorse or promote products derived
54 * from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
57 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
58 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
59 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
60 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
62 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
63 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
64 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
65 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
66 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 *
68 *****************************************************************************/
69 #include <linux/kernel.h>
70 #include <linux/slab.h>
71 #include <linux/skbuff.h>
72 #include <linux/netdevice.h>
73 #include <linux/etherdevice.h>
74 #include <linux/ip.h>
75 #include <linux/if_arp.h>
76 #include <linux/time.h>
77 #include <net/mac80211.h>
78 #include <net/ieee80211_radiotap.h>
79 #include <net/tcp.h>
80
81 #include "iwl-op-mode.h"
82 #include "iwl-io.h"
83 #include "mvm.h"
84 #include "sta.h"
85 #include "time-event.h"
86 #include "iwl-eeprom-parse.h"
87 #include "iwl-phy-db.h"
88 #include "testmode.h"
89 #include "fw/error-dump.h"
90 #include "iwl-prph.h"
91 #include "iwl-nvm-parse.h"
92
93 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
94 {
95 .max = 1,
96 .types = BIT(NL80211_IFTYPE_STATION),
97 },
98 {
99 .max = 1,
100 .types = BIT(NL80211_IFTYPE_AP) |
101 BIT(NL80211_IFTYPE_P2P_CLIENT) |
102 BIT(NL80211_IFTYPE_P2P_GO),
103 },
104 {
105 .max = 1,
106 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
107 },
108 };
109
110 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
111 {
112 .num_different_channels = 2,
113 .max_interfaces = 3,
114 .limits = iwl_mvm_limits,
115 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
116 },
117 };
118
119 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
120 /*
121 * Use the reserved field to indicate magic values.
122 * these values will only be used internally by the driver,
123 * and won't make it to the fw (reserved will be 0).
124 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
125 * be the vif's ip address. in case there is not a single
126 * ip address (0, or more than 1), this attribute will
127 * be skipped.
128 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
129 * the LSB bytes of the vif's mac address
130 */
131 enum {
132 BC_FILTER_MAGIC_NONE = 0,
133 BC_FILTER_MAGIC_IP,
134 BC_FILTER_MAGIC_MAC,
135 };
136
137 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
138 {
139 /* arp */
140 .discard = 0,
141 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
142 .attrs = {
143 {
144 /* frame type - arp, hw type - ethernet */
145 .offset_type =
146 BCAST_FILTER_OFFSET_PAYLOAD_START,
147 .offset = sizeof(rfc1042_header),
148 .val = cpu_to_be32(0x08060001),
149 .mask = cpu_to_be32(0xffffffff),
150 },
151 {
152 /* arp dest ip */
153 .offset_type =
154 BCAST_FILTER_OFFSET_PAYLOAD_START,
155 .offset = sizeof(rfc1042_header) + 2 +
156 sizeof(struct arphdr) +
157 ETH_ALEN + sizeof(__be32) +
158 ETH_ALEN,
159 .mask = cpu_to_be32(0xffffffff),
160 /* mark it as special field */
161 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
162 },
163 },
164 },
165 {
166 /* dhcp offer bcast */
167 .discard = 0,
168 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
169 .attrs = {
170 {
171 /* udp dest port - 68 (bootp client)*/
172 .offset_type = BCAST_FILTER_OFFSET_IP_END,
173 .offset = offsetof(struct udphdr, dest),
174 .val = cpu_to_be32(0x00440000),
175 .mask = cpu_to_be32(0xffff0000),
176 },
177 {
178 /* dhcp - lsb bytes of client hw address */
179 .offset_type = BCAST_FILTER_OFFSET_IP_END,
180 .offset = 38,
181 .mask = cpu_to_be32(0xffffffff),
182 /* mark it as special field */
183 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
184 },
185 },
186 },
187 /* last filter must be empty */
188 {},
189 };
190 #endif
191
iwl_mvm_ref(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)192 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
193 {
194 if (!iwl_mvm_is_d0i3_supported(mvm))
195 return;
196
197 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
198 spin_lock_bh(&mvm->refs_lock);
199 mvm->refs[ref_type]++;
200 spin_unlock_bh(&mvm->refs_lock);
201 iwl_trans_ref(mvm->trans);
202 }
203
iwl_mvm_unref(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)204 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
205 {
206 if (!iwl_mvm_is_d0i3_supported(mvm))
207 return;
208
209 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
210 spin_lock_bh(&mvm->refs_lock);
211 if (WARN_ON(!mvm->refs[ref_type])) {
212 spin_unlock_bh(&mvm->refs_lock);
213 return;
214 }
215 mvm->refs[ref_type]--;
216 spin_unlock_bh(&mvm->refs_lock);
217 iwl_trans_unref(mvm->trans);
218 }
219
iwl_mvm_unref_all_except(struct iwl_mvm * mvm,enum iwl_mvm_ref_type except_ref)220 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
221 enum iwl_mvm_ref_type except_ref)
222 {
223 int i, j;
224
225 if (!iwl_mvm_is_d0i3_supported(mvm))
226 return;
227
228 spin_lock_bh(&mvm->refs_lock);
229 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
230 if (except_ref == i || !mvm->refs[i])
231 continue;
232
233 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
234 i, mvm->refs[i]);
235 for (j = 0; j < mvm->refs[i]; j++)
236 iwl_trans_unref(mvm->trans);
237 mvm->refs[i] = 0;
238 }
239 spin_unlock_bh(&mvm->refs_lock);
240 }
241
iwl_mvm_ref_taken(struct iwl_mvm * mvm)242 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
243 {
244 int i;
245 bool taken = false;
246
247 if (!iwl_mvm_is_d0i3_supported(mvm))
248 return true;
249
250 spin_lock_bh(&mvm->refs_lock);
251 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
252 if (mvm->refs[i]) {
253 taken = true;
254 break;
255 }
256 }
257 spin_unlock_bh(&mvm->refs_lock);
258
259 return taken;
260 }
261
iwl_mvm_ref_sync(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)262 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
263 {
264 iwl_mvm_ref(mvm, ref_type);
265
266 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
267 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
268 HZ)) {
269 WARN_ON_ONCE(1);
270 iwl_mvm_unref(mvm, ref_type);
271 return -EIO;
272 }
273
274 return 0;
275 }
276
iwl_mvm_reset_phy_ctxts(struct iwl_mvm * mvm)277 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
278 {
279 int i;
280
281 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
282 for (i = 0; i < NUM_PHY_CTX; i++) {
283 mvm->phy_ctxts[i].id = i;
284 mvm->phy_ctxts[i].ref = 0;
285 }
286 }
287
iwl_mvm_get_regdomain(struct wiphy * wiphy,const char * alpha2,enum iwl_mcc_source src_id,bool * changed)288 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
289 const char *alpha2,
290 enum iwl_mcc_source src_id,
291 bool *changed)
292 {
293 struct ieee80211_regdomain *regd = NULL;
294 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
295 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
296 struct iwl_mcc_update_resp *resp;
297
298 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
299
300 lockdep_assert_held(&mvm->mutex);
301
302 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
303 if (IS_ERR_OR_NULL(resp)) {
304 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
305 PTR_ERR_OR_ZERO(resp));
306 goto out;
307 }
308
309 if (changed)
310 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
311
312 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
313 __le32_to_cpu(resp->n_channels),
314 resp->channels,
315 __le16_to_cpu(resp->mcc),
316 __le16_to_cpu(resp->geo_info));
317 /* Store the return source id */
318 src_id = resp->source_id;
319 kfree(resp);
320 if (IS_ERR_OR_NULL(regd)) {
321 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
322 PTR_ERR_OR_ZERO(regd));
323 goto out;
324 }
325
326 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
327 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
328 mvm->lar_regdom_set = true;
329 mvm->mcc_src = src_id;
330
331 out:
332 return regd;
333 }
334
iwl_mvm_update_changed_regdom(struct iwl_mvm * mvm)335 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
336 {
337 bool changed;
338 struct ieee80211_regdomain *regd;
339
340 if (!iwl_mvm_is_lar_supported(mvm))
341 return;
342
343 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
344 if (!IS_ERR_OR_NULL(regd)) {
345 /* only update the regulatory core if changed */
346 if (changed)
347 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
348
349 kfree(regd);
350 }
351 }
352
iwl_mvm_get_current_regdomain(struct iwl_mvm * mvm,bool * changed)353 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
354 bool *changed)
355 {
356 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
357 iwl_mvm_is_wifi_mcc_supported(mvm) ?
358 MCC_SOURCE_GET_CURRENT :
359 MCC_SOURCE_OLD_FW, changed);
360 }
361
iwl_mvm_init_fw_regd(struct iwl_mvm * mvm)362 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
363 {
364 enum iwl_mcc_source used_src;
365 struct ieee80211_regdomain *regd;
366 int ret;
367 bool changed;
368 const struct ieee80211_regdomain *r =
369 rtnl_dereference(mvm->hw->wiphy->regd);
370
371 if (!r)
372 return -ENOENT;
373
374 /* save the last source in case we overwrite it below */
375 used_src = mvm->mcc_src;
376 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
377 /* Notify the firmware we support wifi location updates */
378 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
379 if (!IS_ERR_OR_NULL(regd))
380 kfree(regd);
381 }
382
383 /* Now set our last stored MCC and source */
384 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
385 &changed);
386 if (IS_ERR_OR_NULL(regd))
387 return -EIO;
388
389 /* update cfg80211 if the regdomain was changed */
390 if (changed)
391 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
392 else
393 ret = 0;
394
395 kfree(regd);
396 return ret;
397 }
398
iwl_mvm_mac_setup_register(struct iwl_mvm * mvm)399 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
400 {
401 struct ieee80211_hw *hw = mvm->hw;
402 int num_mac, ret, i;
403 static const u32 mvm_ciphers[] = {
404 WLAN_CIPHER_SUITE_WEP40,
405 WLAN_CIPHER_SUITE_WEP104,
406 WLAN_CIPHER_SUITE_TKIP,
407 WLAN_CIPHER_SUITE_CCMP,
408 };
409
410 /* Tell mac80211 our characteristics */
411 ieee80211_hw_set(hw, SIGNAL_DBM);
412 ieee80211_hw_set(hw, SPECTRUM_MGMT);
413 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
414 ieee80211_hw_set(hw, QUEUE_CONTROL);
415 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
416 ieee80211_hw_set(hw, SUPPORTS_PS);
417 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
418 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
419 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
420 ieee80211_hw_set(hw, CONNECTION_MONITOR);
421 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
422 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
423 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
424 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
425 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
426 ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
427
428 if (iwl_mvm_has_tlc_offload(mvm)) {
429 ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
430 ieee80211_hw_set(hw, HAS_RATE_CONTROL);
431 }
432
433 if (iwl_mvm_has_new_rx_api(mvm))
434 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
435
436 if (fw_has_capa(&mvm->fw->ucode_capa,
437 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) {
438 ieee80211_hw_set(hw, AP_LINK_PS);
439 } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
440 /*
441 * we absolutely need this for the new TX API since that comes
442 * with many more queues than the current code can deal with
443 * for station powersave
444 */
445 return -EINVAL;
446 }
447
448 if (mvm->trans->num_rx_queues > 1)
449 ieee80211_hw_set(hw, USES_RSS);
450
451 if (mvm->trans->max_skb_frags)
452 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
453
454 hw->queues = IEEE80211_MAX_QUEUES;
455 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
456 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
457 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
458 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
459 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
460
461 hw->radiotap_timestamp.units_pos =
462 IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
463 IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
464 /* this is the case for CCK frames, it's better (only 8) for OFDM */
465 hw->radiotap_timestamp.accuracy = 22;
466
467 if (!iwl_mvm_has_tlc_offload(mvm))
468 hw->rate_control_algorithm = RS_NAME;
469
470 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
471 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
472
473 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
474 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
475 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
476 hw->wiphy->cipher_suites = mvm->ciphers;
477
478 if (iwl_mvm_has_new_rx_api(mvm)) {
479 mvm->ciphers[hw->wiphy->n_cipher_suites] =
480 WLAN_CIPHER_SUITE_GCMP;
481 hw->wiphy->n_cipher_suites++;
482 mvm->ciphers[hw->wiphy->n_cipher_suites] =
483 WLAN_CIPHER_SUITE_GCMP_256;
484 hw->wiphy->n_cipher_suites++;
485 }
486
487 /* Enable 11w if software crypto is not enabled (as the
488 * firmware will interpret some mgmt packets, so enabling it
489 * with software crypto isn't safe).
490 */
491 if (!iwlwifi_mod_params.swcrypto) {
492 ieee80211_hw_set(hw, MFP_CAPABLE);
493 mvm->ciphers[hw->wiphy->n_cipher_suites] =
494 WLAN_CIPHER_SUITE_AES_CMAC;
495 hw->wiphy->n_cipher_suites++;
496 if (iwl_mvm_has_new_rx_api(mvm)) {
497 mvm->ciphers[hw->wiphy->n_cipher_suites] =
498 WLAN_CIPHER_SUITE_BIP_GMAC_128;
499 hw->wiphy->n_cipher_suites++;
500 mvm->ciphers[hw->wiphy->n_cipher_suites] =
501 WLAN_CIPHER_SUITE_BIP_GMAC_256;
502 hw->wiphy->n_cipher_suites++;
503 }
504 }
505
506 /* currently FW API supports only one optional cipher scheme */
507 if (mvm->fw->cs[0].cipher) {
508 const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
509 struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
510
511 mvm->hw->n_cipher_schemes = 1;
512
513 cs->cipher = le32_to_cpu(fwcs->cipher);
514 cs->iftype = BIT(NL80211_IFTYPE_STATION);
515 cs->hdr_len = fwcs->hdr_len;
516 cs->pn_len = fwcs->pn_len;
517 cs->pn_off = fwcs->pn_off;
518 cs->key_idx_off = fwcs->key_idx_off;
519 cs->key_idx_mask = fwcs->key_idx_mask;
520 cs->key_idx_shift = fwcs->key_idx_shift;
521 cs->mic_len = fwcs->mic_len;
522
523 mvm->hw->cipher_schemes = mvm->cs;
524 mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
525 hw->wiphy->n_cipher_suites++;
526 }
527
528 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
529 hw->wiphy->features |=
530 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
531 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
532 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
533
534 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
535 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
536 hw->chanctx_data_size = sizeof(u16);
537
538 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
539 BIT(NL80211_IFTYPE_P2P_CLIENT) |
540 BIT(NL80211_IFTYPE_AP) |
541 BIT(NL80211_IFTYPE_P2P_GO) |
542 BIT(NL80211_IFTYPE_P2P_DEVICE) |
543 BIT(NL80211_IFTYPE_ADHOC);
544
545 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
546 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
547 if (iwl_mvm_is_lar_supported(mvm))
548 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
549 else
550 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
551 REGULATORY_DISABLE_BEACON_HINTS;
552
553 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
554 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
555
556 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
557 hw->wiphy->n_iface_combinations =
558 ARRAY_SIZE(iwl_mvm_iface_combinations);
559
560 hw->wiphy->max_remain_on_channel_duration = 10000;
561 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
562 /* we can compensate an offset of up to 3 channels = 15 MHz */
563 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
564
565 /* Extract MAC address */
566 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
567 hw->wiphy->addresses = mvm->addresses;
568 hw->wiphy->n_addresses = 1;
569
570 /* Extract additional MAC addresses if available */
571 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
572 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
573
574 for (i = 1; i < num_mac; i++) {
575 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
576 ETH_ALEN);
577 mvm->addresses[i].addr[5]++;
578 hw->wiphy->n_addresses++;
579 }
580
581 iwl_mvm_reset_phy_ctxts(mvm);
582
583 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
584
585 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
586
587 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
588 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
589 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
590
591 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
592 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
593 else
594 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
595
596 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
597 hw->wiphy->bands[NL80211_BAND_2GHZ] =
598 &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
599 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
600 hw->wiphy->bands[NL80211_BAND_5GHZ] =
601 &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
602
603 if (fw_has_capa(&mvm->fw->ucode_capa,
604 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
605 fw_has_api(&mvm->fw->ucode_capa,
606 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
607 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
608 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
609 }
610
611 hw->wiphy->hw_version = mvm->trans->hw_id;
612
613 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
614 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
615 else
616 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
617
618 hw->wiphy->max_sched_scan_reqs = 1;
619 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
620 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
621 /* we create the 802.11 header and zero length SSID IE. */
622 hw->wiphy->max_sched_scan_ie_len =
623 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
624 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
625 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
626
627 /*
628 * the firmware uses u8 for num of iterations, but 0xff is saved for
629 * infinite loop, so the maximum number of iterations is actually 254.
630 */
631 hw->wiphy->max_sched_scan_plan_iterations = 254;
632
633 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
634 NL80211_FEATURE_LOW_PRIORITY_SCAN |
635 NL80211_FEATURE_P2P_GO_OPPPS |
636 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
637 NL80211_FEATURE_DYNAMIC_SMPS |
638 NL80211_FEATURE_STATIC_SMPS |
639 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
640
641 if (fw_has_capa(&mvm->fw->ucode_capa,
642 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
643 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
644 if (fw_has_capa(&mvm->fw->ucode_capa,
645 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
646 hw->wiphy->features |= NL80211_FEATURE_QUIET;
647
648 if (fw_has_capa(&mvm->fw->ucode_capa,
649 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
650 hw->wiphy->features |=
651 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
652
653 if (fw_has_capa(&mvm->fw->ucode_capa,
654 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
655 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
656
657 if (fw_has_api(&mvm->fw->ucode_capa,
658 IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
659 wiphy_ext_feature_set(hw->wiphy,
660 NL80211_EXT_FEATURE_SCAN_START_TIME);
661 wiphy_ext_feature_set(hw->wiphy,
662 NL80211_EXT_FEATURE_BSS_PARENT_TSF);
663 wiphy_ext_feature_set(hw->wiphy,
664 NL80211_EXT_FEATURE_SET_SCAN_DWELL);
665 }
666
667 if (iwl_mvm_is_oce_supported(mvm)) {
668 wiphy_ext_feature_set(hw->wiphy,
669 NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
670 wiphy_ext_feature_set(hw->wiphy,
671 NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
672 wiphy_ext_feature_set(hw->wiphy,
673 NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
674 wiphy_ext_feature_set(hw->wiphy,
675 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
676 }
677
678 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
679
680 #ifdef CONFIG_PM_SLEEP
681 if (iwl_mvm_is_d0i3_supported(mvm) &&
682 device_can_wakeup(mvm->trans->dev)) {
683 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
684 hw->wiphy->wowlan = &mvm->wowlan;
685 }
686
687 if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec &&
688 mvm->trans->ops->d3_suspend &&
689 mvm->trans->ops->d3_resume &&
690 device_can_wakeup(mvm->trans->dev)) {
691 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
692 WIPHY_WOWLAN_DISCONNECT |
693 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
694 WIPHY_WOWLAN_RFKILL_RELEASE |
695 WIPHY_WOWLAN_NET_DETECT;
696 if (!iwlwifi_mod_params.swcrypto)
697 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
698 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
699 WIPHY_WOWLAN_4WAY_HANDSHAKE;
700
701 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
702 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
703 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
704 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
705 hw->wiphy->wowlan = &mvm->wowlan;
706 }
707 #endif
708
709 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
710 /* assign default bcast filtering configuration */
711 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
712 #endif
713
714 ret = iwl_mvm_leds_init(mvm);
715 if (ret)
716 return ret;
717
718 if (fw_has_capa(&mvm->fw->ucode_capa,
719 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
720 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
721 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
722 ieee80211_hw_set(hw, TDLS_WIDER_BW);
723 }
724
725 if (fw_has_capa(&mvm->fw->ucode_capa,
726 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
727 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
728 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
729 }
730
731 hw->netdev_features |= mvm->cfg->features;
732 if (!iwl_mvm_is_csum_supported(mvm)) {
733 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
734 NETIF_F_RXCSUM);
735 /* We may support SW TX CSUM */
736 if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
737 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
738 }
739
740 ret = ieee80211_register_hw(mvm->hw);
741 if (ret)
742 iwl_mvm_leds_exit(mvm);
743 mvm->init_status |= IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
744
745 if (mvm->cfg->vht_mu_mimo_supported)
746 wiphy_ext_feature_set(hw->wiphy,
747 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
748
749 return ret;
750 }
751
iwl_mvm_defer_tx(struct iwl_mvm * mvm,struct ieee80211_sta * sta,struct sk_buff * skb)752 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
753 struct ieee80211_sta *sta,
754 struct sk_buff *skb)
755 {
756 struct iwl_mvm_sta *mvmsta;
757 bool defer = false;
758
759 /*
760 * double check the IN_D0I3 flag both before and after
761 * taking the spinlock, in order to prevent taking
762 * the spinlock when not needed.
763 */
764 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
765 return false;
766
767 spin_lock(&mvm->d0i3_tx_lock);
768 /*
769 * testing the flag again ensures the skb dequeue
770 * loop (on d0i3 exit) hasn't run yet.
771 */
772 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
773 goto out;
774
775 mvmsta = iwl_mvm_sta_from_mac80211(sta);
776 if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
777 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
778 goto out;
779
780 __skb_queue_tail(&mvm->d0i3_tx, skb);
781 ieee80211_stop_queues(mvm->hw);
782
783 /* trigger wakeup */
784 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
785 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
786
787 defer = true;
788 out:
789 spin_unlock(&mvm->d0i3_tx_lock);
790 return defer;
791 }
792
iwl_mvm_mac_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)793 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
794 struct ieee80211_tx_control *control,
795 struct sk_buff *skb)
796 {
797 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
798 struct ieee80211_sta *sta = control->sta;
799 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
800 struct ieee80211_hdr *hdr = (void *)skb->data;
801
802 if (iwl_mvm_is_radio_killed(mvm)) {
803 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
804 goto drop;
805 }
806
807 if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
808 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
809 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
810 goto drop;
811
812 /* treat non-bufferable MMPDUs on AP interfaces as broadcast */
813 if ((info->control.vif->type == NL80211_IFTYPE_AP ||
814 info->control.vif->type == NL80211_IFTYPE_ADHOC) &&
815 ieee80211_is_mgmt(hdr->frame_control) &&
816 !ieee80211_is_bufferable_mmpdu(hdr->frame_control))
817 sta = NULL;
818
819 if (sta) {
820 if (iwl_mvm_defer_tx(mvm, sta, skb))
821 return;
822 if (iwl_mvm_tx_skb(mvm, skb, sta))
823 goto drop;
824 return;
825 }
826
827 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
828 goto drop;
829 return;
830 drop:
831 ieee80211_free_txskb(hw, skb);
832 }
833
iwl_enable_rx_ampdu(const struct iwl_cfg * cfg)834 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
835 {
836 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
837 return false;
838 return true;
839 }
840
iwl_enable_tx_ampdu(const struct iwl_cfg * cfg)841 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
842 {
843 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
844 return false;
845 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
846 return true;
847
848 /* enabled by default */
849 return true;
850 }
851
852 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
853 do { \
854 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
855 break; \
856 iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \
857 } while (0)
858
859 static void
iwl_mvm_ampdu_check_trigger(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid,u16 rx_ba_ssn,enum ieee80211_ampdu_mlme_action action)860 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
861 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
862 enum ieee80211_ampdu_mlme_action action)
863 {
864 struct iwl_fw_dbg_trigger_tlv *trig;
865 struct iwl_fw_dbg_trigger_ba *ba_trig;
866
867 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
868 return;
869
870 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
871 ba_trig = (void *)trig->data;
872
873 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
874 ieee80211_vif_to_wdev(vif), trig))
875 return;
876
877 switch (action) {
878 case IEEE80211_AMPDU_TX_OPERATIONAL: {
879 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
880 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
881
882 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
883 "TX AGG START: MAC %pM tid %d ssn %d\n",
884 sta->addr, tid, tid_data->ssn);
885 break;
886 }
887 case IEEE80211_AMPDU_TX_STOP_CONT:
888 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
889 "TX AGG STOP: MAC %pM tid %d\n",
890 sta->addr, tid);
891 break;
892 case IEEE80211_AMPDU_RX_START:
893 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
894 "RX AGG START: MAC %pM tid %d ssn %d\n",
895 sta->addr, tid, rx_ba_ssn);
896 break;
897 case IEEE80211_AMPDU_RX_STOP:
898 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
899 "RX AGG STOP: MAC %pM tid %d\n",
900 sta->addr, tid);
901 break;
902 default:
903 break;
904 }
905 }
906
iwl_mvm_mac_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)907 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
908 struct ieee80211_vif *vif,
909 struct ieee80211_ampdu_params *params)
910 {
911 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
912 int ret;
913 bool tx_agg_ref = false;
914 struct ieee80211_sta *sta = params->sta;
915 enum ieee80211_ampdu_mlme_action action = params->action;
916 u16 tid = params->tid;
917 u16 *ssn = ¶ms->ssn;
918 u16 buf_size = params->buf_size;
919 bool amsdu = params->amsdu;
920 u16 timeout = params->timeout;
921
922 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
923 sta->addr, tid, action);
924
925 if (!(mvm->nvm_data->sku_cap_11n_enable))
926 return -EACCES;
927
928 /* return from D0i3 before starting a new Tx aggregation */
929 switch (action) {
930 case IEEE80211_AMPDU_TX_START:
931 case IEEE80211_AMPDU_TX_STOP_CONT:
932 case IEEE80211_AMPDU_TX_STOP_FLUSH:
933 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
934 case IEEE80211_AMPDU_TX_OPERATIONAL:
935 /*
936 * for tx start, wait synchronously until D0i3 exit to
937 * get the correct sequence number for the tid.
938 * additionally, some other ampdu actions use direct
939 * target access, which is not handled automatically
940 * by the trans layer (unlike commands), so wait for
941 * d0i3 exit in these cases as well.
942 */
943 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
944 if (ret)
945 return ret;
946
947 tx_agg_ref = true;
948 break;
949 default:
950 break;
951 }
952
953 mutex_lock(&mvm->mutex);
954
955 switch (action) {
956 case IEEE80211_AMPDU_RX_START:
957 if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id ==
958 iwl_mvm_sta_from_mac80211(sta)->sta_id) {
959 struct iwl_mvm_vif *mvmvif;
960 u16 macid = iwl_mvm_vif_from_mac80211(vif)->id;
961 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid];
962
963 mdata->opened_rx_ba_sessions = true;
964 mvmvif = iwl_mvm_vif_from_mac80211(vif);
965 cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk);
966 }
967 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
968 ret = -EINVAL;
969 break;
970 }
971 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
972 timeout);
973 break;
974 case IEEE80211_AMPDU_RX_STOP:
975 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
976 timeout);
977 break;
978 case IEEE80211_AMPDU_TX_START:
979 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
980 ret = -EINVAL;
981 break;
982 }
983 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
984 break;
985 case IEEE80211_AMPDU_TX_STOP_CONT:
986 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
987 break;
988 case IEEE80211_AMPDU_TX_STOP_FLUSH:
989 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
990 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
991 break;
992 case IEEE80211_AMPDU_TX_OPERATIONAL:
993 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
994 buf_size, amsdu);
995 break;
996 default:
997 WARN_ON_ONCE(1);
998 ret = -EINVAL;
999 break;
1000 }
1001
1002 if (!ret) {
1003 u16 rx_ba_ssn = 0;
1004
1005 if (action == IEEE80211_AMPDU_RX_START)
1006 rx_ba_ssn = *ssn;
1007
1008 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
1009 rx_ba_ssn, action);
1010 }
1011 mutex_unlock(&mvm->mutex);
1012
1013 /*
1014 * If the tid is marked as started, we won't use it for offloaded
1015 * traffic on the next D0i3 entry. It's safe to unref.
1016 */
1017 if (tx_agg_ref)
1018 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
1019
1020 return ret;
1021 }
1022
iwl_mvm_cleanup_iterator(void * data,u8 * mac,struct ieee80211_vif * vif)1023 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
1024 struct ieee80211_vif *vif)
1025 {
1026 struct iwl_mvm *mvm = data;
1027 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1028
1029 mvmvif->uploaded = false;
1030 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1031
1032 spin_lock_bh(&mvm->time_event_lock);
1033 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
1034 spin_unlock_bh(&mvm->time_event_lock);
1035
1036 mvmvif->phy_ctxt = NULL;
1037 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
1038 }
1039
iwl_mvm_restart_cleanup(struct iwl_mvm * mvm)1040 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1041 {
1042 /* clear the D3 reconfig, we only need it to avoid dumping a
1043 * firmware coredump on reconfiguration, we shouldn't do that
1044 * on D3->D0 transition
1045 */
1046 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1047 mvm->fwrt.dump.desc = &iwl_dump_desc_assert;
1048 iwl_fw_error_dump(&mvm->fwrt);
1049 }
1050
1051 /* cleanup all stale references (scan, roc), but keep the
1052 * ucode_down ref until reconfig is complete
1053 */
1054 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1055
1056 iwl_mvm_stop_device(mvm);
1057
1058 mvm->scan_status = 0;
1059 mvm->ps_disabled = false;
1060 mvm->calibrating = false;
1061
1062 /* just in case one was running */
1063 iwl_mvm_cleanup_roc_te(mvm);
1064 ieee80211_remain_on_channel_expired(mvm->hw);
1065
1066 /*
1067 * cleanup all interfaces, even inactive ones, as some might have
1068 * gone down during the HW restart
1069 */
1070 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1071
1072 mvm->p2p_device_vif = NULL;
1073 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1074
1075 iwl_mvm_reset_phy_ctxts(mvm);
1076 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
1077 memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
1078 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1079 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1080
1081 ieee80211_wake_queues(mvm->hw);
1082
1083 /* clear any stale d0i3 state */
1084 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1085
1086 mvm->vif_count = 0;
1087 mvm->rx_ba_sessions = 0;
1088 mvm->fwrt.dump.conf = FW_DBG_INVALID;
1089 mvm->monitor_on = false;
1090
1091 /* keep statistics ticking */
1092 iwl_mvm_accu_radio_stats(mvm);
1093 }
1094
__iwl_mvm_mac_start(struct iwl_mvm * mvm)1095 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1096 {
1097 int ret;
1098
1099 lockdep_assert_held(&mvm->mutex);
1100
1101 if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
1102 /*
1103 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
1104 * so later code will - from now on - see that we're doing it.
1105 */
1106 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1107 clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
1108 /* Clean up some internal and mac80211 state on restart */
1109 iwl_mvm_restart_cleanup(mvm);
1110 } else {
1111 /* Hold the reference to prevent runtime suspend while
1112 * the start procedure runs. It's a bit confusing
1113 * that the UCODE_DOWN reference is taken, but it just
1114 * means "UCODE is not UP yet". ( TODO: rename this
1115 * reference).
1116 */
1117 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1118 }
1119 ret = iwl_mvm_up(mvm);
1120
1121 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1122 /* Something went wrong - we need to finish some cleanup
1123 * that normally iwl_mvm_mac_restart_complete() below
1124 * would do.
1125 */
1126 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1127 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1128 }
1129
1130 return ret;
1131 }
1132
iwl_mvm_mac_start(struct ieee80211_hw * hw)1133 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1134 {
1135 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1136 int ret;
1137
1138 /* Some hw restart cleanups must not hold the mutex */
1139 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1140 /*
1141 * Make sure we are out of d0i3. This is needed
1142 * to make sure the reference accounting is correct
1143 * (and there is no stale d0i3_exit_work).
1144 */
1145 wait_event_timeout(mvm->d0i3_exit_waitq,
1146 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1147 &mvm->status),
1148 HZ);
1149 }
1150
1151 mutex_lock(&mvm->mutex);
1152 ret = __iwl_mvm_mac_start(mvm);
1153 mutex_unlock(&mvm->mutex);
1154
1155 return ret;
1156 }
1157
iwl_mvm_restart_complete(struct iwl_mvm * mvm)1158 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1159 {
1160 int ret;
1161
1162 mutex_lock(&mvm->mutex);
1163
1164 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1165 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1166 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1167 if (ret)
1168 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1169 ret);
1170
1171 /* allow transport/FW low power modes */
1172 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1173
1174 /*
1175 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1176 * of packets the FW sent out, so we must reconnect.
1177 */
1178 iwl_mvm_teardown_tdls_peers(mvm);
1179
1180 mutex_unlock(&mvm->mutex);
1181 }
1182
iwl_mvm_resume_complete(struct iwl_mvm * mvm)1183 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1184 {
1185 if (iwl_mvm_is_d0i3_supported(mvm) &&
1186 iwl_mvm_enter_d0i3_on_suspend(mvm))
1187 WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq,
1188 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1189 &mvm->status),
1190 HZ),
1191 "D0i3 exit on resume timed out\n");
1192 }
1193
1194 static void
iwl_mvm_mac_reconfig_complete(struct ieee80211_hw * hw,enum ieee80211_reconfig_type reconfig_type)1195 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1196 enum ieee80211_reconfig_type reconfig_type)
1197 {
1198 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1199
1200 switch (reconfig_type) {
1201 case IEEE80211_RECONFIG_TYPE_RESTART:
1202 iwl_mvm_restart_complete(mvm);
1203 break;
1204 case IEEE80211_RECONFIG_TYPE_SUSPEND:
1205 iwl_mvm_resume_complete(mvm);
1206 break;
1207 }
1208 }
1209
__iwl_mvm_mac_stop(struct iwl_mvm * mvm)1210 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1211 {
1212 lockdep_assert_held(&mvm->mutex);
1213
1214 /* firmware counters are obviously reset now, but we shouldn't
1215 * partially track so also clear the fw_reset_accu counters.
1216 */
1217 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1218
1219 /* async_handlers_wk is now blocked */
1220
1221 /*
1222 * The work item could be running or queued if the
1223 * ROC time event stops just as we get here.
1224 */
1225 flush_work(&mvm->roc_done_wk);
1226
1227 iwl_mvm_stop_device(mvm);
1228
1229 iwl_mvm_async_handlers_purge(mvm);
1230 /* async_handlers_list is empty and will stay empty: HW is stopped */
1231
1232 /* the fw is stopped, the aux sta is dead: clean up driver state */
1233 iwl_mvm_del_aux_sta(mvm);
1234
1235 /*
1236 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1237 * won't be called in this case).
1238 * But make sure to cleanup interfaces that have gone down before/during
1239 * HW restart was requested.
1240 */
1241 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1242 ieee80211_iterate_interfaces(mvm->hw, 0,
1243 iwl_mvm_cleanup_iterator, mvm);
1244
1245 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1246 * make sure there's nothing left there and warn if any is found.
1247 */
1248 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1249 int i;
1250
1251 for (i = 0; i < mvm->max_scans; i++) {
1252 if (WARN_ONCE(mvm->scan_uid_status[i],
1253 "UMAC scan UID %d status was not cleaned\n",
1254 i))
1255 mvm->scan_uid_status[i] = 0;
1256 }
1257 }
1258 }
1259
iwl_mvm_mac_stop(struct ieee80211_hw * hw)1260 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1261 {
1262 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1263
1264 flush_work(&mvm->d0i3_exit_work);
1265 flush_work(&mvm->async_handlers_wk);
1266 flush_work(&mvm->add_stream_wk);
1267
1268 /*
1269 * Lock and clear the firmware running bit here already, so that
1270 * new commands coming in elsewhere, e.g. from debugfs, will not
1271 * be able to proceed. This is important here because one of those
1272 * debugfs files causes the firmware dump to be triggered, and if we
1273 * don't stop debugfs accesses before canceling that it could be
1274 * retriggered after we flush it but before we've cleared the bit.
1275 */
1276 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
1277
1278 iwl_fw_cancel_dump(&mvm->fwrt);
1279 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
1280 cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
1281 iwl_fw_free_dump_desc(&mvm->fwrt);
1282
1283 mutex_lock(&mvm->mutex);
1284 __iwl_mvm_mac_stop(mvm);
1285 mutex_unlock(&mvm->mutex);
1286
1287 /*
1288 * The worker might have been waiting for the mutex, let it run and
1289 * discover that its list is now empty.
1290 */
1291 cancel_work_sync(&mvm->async_handlers_wk);
1292 }
1293
iwl_mvm_get_free_phy_ctxt(struct iwl_mvm * mvm)1294 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1295 {
1296 u16 i;
1297
1298 lockdep_assert_held(&mvm->mutex);
1299
1300 for (i = 0; i < NUM_PHY_CTX; i++)
1301 if (!mvm->phy_ctxts[i].ref)
1302 return &mvm->phy_ctxts[i];
1303
1304 IWL_ERR(mvm, "No available PHY context\n");
1305 return NULL;
1306 }
1307
iwl_mvm_set_tx_power(struct iwl_mvm * mvm,struct ieee80211_vif * vif,s16 tx_power)1308 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1309 s16 tx_power)
1310 {
1311 struct iwl_dev_tx_power_cmd cmd = {
1312 .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1313 .v3.mac_context_id =
1314 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1315 .v3.pwr_restriction = cpu_to_le16(8 * tx_power),
1316 };
1317 int len = sizeof(cmd);
1318
1319 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1320 cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1321
1322 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
1323 len = sizeof(cmd.v3);
1324
1325 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1326 }
1327
iwl_mvm_mac_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1328 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1329 struct ieee80211_vif *vif)
1330 {
1331 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1332 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1333 int ret;
1334
1335 mvmvif->mvm = mvm;
1336
1337 /*
1338 * make sure D0i3 exit is completed, otherwise a target access
1339 * during tx queue configuration could be done when still in
1340 * D0i3 state.
1341 */
1342 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1343 if (ret)
1344 return ret;
1345
1346 /*
1347 * Not much to do here. The stack will not allow interface
1348 * types or combinations that we didn't advertise, so we
1349 * don't really have to check the types.
1350 */
1351
1352 mutex_lock(&mvm->mutex);
1353
1354 /* make sure that beacon statistics don't go backwards with FW reset */
1355 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1356 mvmvif->beacon_stats.accu_num_beacons +=
1357 mvmvif->beacon_stats.num_beacons;
1358
1359 /* Allocate resources for the MAC context, and add it to the fw */
1360 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1361 if (ret)
1362 goto out_unlock;
1363
1364 /* Counting number of interfaces is needed for legacy PM */
1365 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1366 mvm->vif_count++;
1367
1368 /*
1369 * The AP binding flow can be done only after the beacon
1370 * template is configured (which happens only in the mac80211
1371 * start_ap() flow), and adding the broadcast station can happen
1372 * only after the binding.
1373 * In addition, since modifying the MAC before adding a bcast
1374 * station is not allowed by the FW, delay the adding of MAC context to
1375 * the point where we can also add the bcast station.
1376 * In short: there's not much we can do at this point, other than
1377 * allocating resources :)
1378 */
1379 if (vif->type == NL80211_IFTYPE_AP ||
1380 vif->type == NL80211_IFTYPE_ADHOC) {
1381 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1382 if (ret) {
1383 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1384 goto out_release;
1385 }
1386
1387 /*
1388 * Only queue for this station is the mcast queue,
1389 * which shouldn't be in TFD mask anyway
1390 */
1391 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
1392 0, vif->type,
1393 IWL_STA_MULTICAST);
1394 if (ret)
1395 goto out_release;
1396
1397 iwl_mvm_vif_dbgfs_register(mvm, vif);
1398 goto out_unlock;
1399 }
1400
1401 mvmvif->features |= hw->netdev_features;
1402
1403 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1404 if (ret)
1405 goto out_release;
1406
1407 ret = iwl_mvm_power_update_mac(mvm);
1408 if (ret)
1409 goto out_remove_mac;
1410
1411 /* beacon filtering */
1412 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1413 if (ret)
1414 goto out_remove_mac;
1415
1416 if (!mvm->bf_allowed_vif &&
1417 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1418 mvm->bf_allowed_vif = mvmvif;
1419 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1420 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1421 }
1422
1423 /*
1424 * P2P_DEVICE interface does not have a channel context assigned to it,
1425 * so a dedicated PHY context is allocated to it and the corresponding
1426 * MAC context is bound to it at this stage.
1427 */
1428 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1429
1430 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1431 if (!mvmvif->phy_ctxt) {
1432 ret = -ENOSPC;
1433 goto out_free_bf;
1434 }
1435
1436 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1437 ret = iwl_mvm_binding_add_vif(mvm, vif);
1438 if (ret)
1439 goto out_unref_phy;
1440
1441 ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
1442 if (ret)
1443 goto out_unbind;
1444
1445 /* Save a pointer to p2p device vif, so it can later be used to
1446 * update the p2p device MAC when a GO is started/stopped */
1447 mvm->p2p_device_vif = vif;
1448 }
1449
1450 iwl_mvm_tcm_add_vif(mvm, vif);
1451
1452 if (vif->type == NL80211_IFTYPE_MONITOR)
1453 mvm->monitor_on = true;
1454
1455 iwl_mvm_vif_dbgfs_register(mvm, vif);
1456 goto out_unlock;
1457
1458 out_unbind:
1459 iwl_mvm_binding_remove_vif(mvm, vif);
1460 out_unref_phy:
1461 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1462 out_free_bf:
1463 if (mvm->bf_allowed_vif == mvmvif) {
1464 mvm->bf_allowed_vif = NULL;
1465 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1466 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1467 }
1468 out_remove_mac:
1469 mvmvif->phy_ctxt = NULL;
1470 iwl_mvm_mac_ctxt_remove(mvm, vif);
1471 out_release:
1472 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1473 mvm->vif_count--;
1474 out_unlock:
1475 mutex_unlock(&mvm->mutex);
1476
1477 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1478
1479 return ret;
1480 }
1481
iwl_mvm_prepare_mac_removal(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1482 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1483 struct ieee80211_vif *vif)
1484 {
1485 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1486 /*
1487 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1488 * We assume here that all the packets sent to the OFFCHANNEL
1489 * queue are sent in ROC session.
1490 */
1491 flush_work(&mvm->roc_done_wk);
1492 }
1493 }
1494
iwl_mvm_mac_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1495 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1496 struct ieee80211_vif *vif)
1497 {
1498 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1499 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1500
1501 iwl_mvm_prepare_mac_removal(mvm, vif);
1502
1503 if (!(vif->type == NL80211_IFTYPE_AP ||
1504 vif->type == NL80211_IFTYPE_ADHOC))
1505 iwl_mvm_tcm_rm_vif(mvm, vif);
1506
1507 mutex_lock(&mvm->mutex);
1508
1509 if (mvm->bf_allowed_vif == mvmvif) {
1510 mvm->bf_allowed_vif = NULL;
1511 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1512 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1513 }
1514
1515 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1516
1517 /*
1518 * For AP/GO interface, the tear down of the resources allocated to the
1519 * interface is be handled as part of the stop_ap flow.
1520 */
1521 if (vif->type == NL80211_IFTYPE_AP ||
1522 vif->type == NL80211_IFTYPE_ADHOC) {
1523 #ifdef CONFIG_NL80211_TESTMODE
1524 if (vif == mvm->noa_vif) {
1525 mvm->noa_vif = NULL;
1526 mvm->noa_duration = 0;
1527 }
1528 #endif
1529 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta);
1530 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1531 goto out_release;
1532 }
1533
1534 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1535 mvm->p2p_device_vif = NULL;
1536 iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
1537 iwl_mvm_binding_remove_vif(mvm, vif);
1538 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1539 mvmvif->phy_ctxt = NULL;
1540 }
1541
1542 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1543 mvm->vif_count--;
1544
1545 iwl_mvm_power_update_mac(mvm);
1546 iwl_mvm_mac_ctxt_remove(mvm, vif);
1547
1548 if (vif->type == NL80211_IFTYPE_MONITOR)
1549 mvm->monitor_on = false;
1550
1551 out_release:
1552 mutex_unlock(&mvm->mutex);
1553 }
1554
iwl_mvm_mac_config(struct ieee80211_hw * hw,u32 changed)1555 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1556 {
1557 return 0;
1558 }
1559
1560 struct iwl_mvm_mc_iter_data {
1561 struct iwl_mvm *mvm;
1562 int port_id;
1563 };
1564
iwl_mvm_mc_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1565 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1566 struct ieee80211_vif *vif)
1567 {
1568 struct iwl_mvm_mc_iter_data *data = _data;
1569 struct iwl_mvm *mvm = data->mvm;
1570 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1571 struct iwl_host_cmd hcmd = {
1572 .id = MCAST_FILTER_CMD,
1573 .flags = CMD_ASYNC,
1574 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1575 };
1576 int ret, len;
1577
1578 /* if we don't have free ports, mcast frames will be dropped */
1579 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1580 return;
1581
1582 if (vif->type != NL80211_IFTYPE_STATION ||
1583 !vif->bss_conf.assoc)
1584 return;
1585
1586 cmd->port_id = data->port_id++;
1587 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1588 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1589
1590 hcmd.len[0] = len;
1591 hcmd.data[0] = cmd;
1592
1593 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1594 if (ret)
1595 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1596 }
1597
iwl_mvm_recalc_multicast(struct iwl_mvm * mvm)1598 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1599 {
1600 struct iwl_mvm_mc_iter_data iter_data = {
1601 .mvm = mvm,
1602 };
1603
1604 lockdep_assert_held(&mvm->mutex);
1605
1606 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1607 return;
1608
1609 ieee80211_iterate_active_interfaces_atomic(
1610 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1611 iwl_mvm_mc_iface_iterator, &iter_data);
1612 }
1613
iwl_mvm_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)1614 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1615 struct netdev_hw_addr_list *mc_list)
1616 {
1617 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1618 struct iwl_mcast_filter_cmd *cmd;
1619 struct netdev_hw_addr *addr;
1620 int addr_count;
1621 bool pass_all;
1622 int len;
1623
1624 addr_count = netdev_hw_addr_list_count(mc_list);
1625 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1626 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1627 if (pass_all)
1628 addr_count = 0;
1629
1630 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1631 cmd = kzalloc(len, GFP_ATOMIC);
1632 if (!cmd)
1633 return 0;
1634
1635 if (pass_all) {
1636 cmd->pass_all = 1;
1637 return (u64)(unsigned long)cmd;
1638 }
1639
1640 netdev_hw_addr_list_for_each(addr, mc_list) {
1641 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1642 cmd->count, addr->addr);
1643 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1644 addr->addr, ETH_ALEN);
1645 cmd->count++;
1646 }
1647
1648 return (u64)(unsigned long)cmd;
1649 }
1650
iwl_mvm_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * total_flags,u64 multicast)1651 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1652 unsigned int changed_flags,
1653 unsigned int *total_flags,
1654 u64 multicast)
1655 {
1656 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1657 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1658
1659 mutex_lock(&mvm->mutex);
1660
1661 /* replace previous configuration */
1662 kfree(mvm->mcast_filter_cmd);
1663 mvm->mcast_filter_cmd = cmd;
1664
1665 if (!cmd)
1666 goto out;
1667
1668 if (changed_flags & FIF_ALLMULTI)
1669 cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
1670
1671 if (cmd->pass_all)
1672 cmd->count = 0;
1673
1674 iwl_mvm_recalc_multicast(mvm);
1675 out:
1676 mutex_unlock(&mvm->mutex);
1677 *total_flags = 0;
1678 }
1679
iwl_mvm_config_iface_filter(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int filter_flags,unsigned int changed_flags)1680 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
1681 struct ieee80211_vif *vif,
1682 unsigned int filter_flags,
1683 unsigned int changed_flags)
1684 {
1685 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1686
1687 /* We support only filter for probe requests */
1688 if (!(changed_flags & FIF_PROBE_REQ))
1689 return;
1690
1691 /* Supported only for p2p client interfaces */
1692 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1693 !vif->p2p)
1694 return;
1695
1696 mutex_lock(&mvm->mutex);
1697 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1698 mutex_unlock(&mvm->mutex);
1699 }
1700
1701 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1702 struct iwl_bcast_iter_data {
1703 struct iwl_mvm *mvm;
1704 struct iwl_bcast_filter_cmd *cmd;
1705 u8 current_filter;
1706 };
1707
1708 static void
iwl_mvm_set_bcast_filter(struct ieee80211_vif * vif,const struct iwl_fw_bcast_filter * in_filter,struct iwl_fw_bcast_filter * out_filter)1709 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
1710 const struct iwl_fw_bcast_filter *in_filter,
1711 struct iwl_fw_bcast_filter *out_filter)
1712 {
1713 struct iwl_fw_bcast_filter_attr *attr;
1714 int i;
1715
1716 memcpy(out_filter, in_filter, sizeof(*out_filter));
1717
1718 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
1719 attr = &out_filter->attrs[i];
1720
1721 if (!attr->mask)
1722 break;
1723
1724 switch (attr->reserved1) {
1725 case cpu_to_le16(BC_FILTER_MAGIC_IP):
1726 if (vif->bss_conf.arp_addr_cnt != 1) {
1727 attr->mask = 0;
1728 continue;
1729 }
1730
1731 attr->val = vif->bss_conf.arp_addr_list[0];
1732 break;
1733 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
1734 attr->val = *(__be32 *)&vif->addr[2];
1735 break;
1736 default:
1737 break;
1738 }
1739 attr->reserved1 = 0;
1740 out_filter->num_attrs++;
1741 }
1742 }
1743
iwl_mvm_bcast_filter_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1744 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1745 struct ieee80211_vif *vif)
1746 {
1747 struct iwl_bcast_iter_data *data = _data;
1748 struct iwl_mvm *mvm = data->mvm;
1749 struct iwl_bcast_filter_cmd *cmd = data->cmd;
1750 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1751 struct iwl_fw_bcast_mac *bcast_mac;
1752 int i;
1753
1754 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
1755 return;
1756
1757 bcast_mac = &cmd->macs[mvmvif->id];
1758
1759 /*
1760 * enable filtering only for associated stations, but not for P2P
1761 * Clients
1762 */
1763 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1764 !vif->bss_conf.assoc)
1765 return;
1766
1767 bcast_mac->default_discard = 1;
1768
1769 /* copy all configured filters */
1770 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
1771 /*
1772 * Make sure we don't exceed our filters limit.
1773 * if there is still a valid filter to be configured,
1774 * be on the safe side and just allow bcast for this mac.
1775 */
1776 if (WARN_ON_ONCE(data->current_filter >=
1777 ARRAY_SIZE(cmd->filters))) {
1778 bcast_mac->default_discard = 0;
1779 bcast_mac->attached_filters = 0;
1780 break;
1781 }
1782
1783 iwl_mvm_set_bcast_filter(vif,
1784 &mvm->bcast_filters[i],
1785 &cmd->filters[data->current_filter]);
1786
1787 /* skip current filter if it contains no attributes */
1788 if (!cmd->filters[data->current_filter].num_attrs)
1789 continue;
1790
1791 /* attach the filter to current mac */
1792 bcast_mac->attached_filters |=
1793 cpu_to_le16(BIT(data->current_filter));
1794
1795 data->current_filter++;
1796 }
1797 }
1798
iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm * mvm,struct iwl_bcast_filter_cmd * cmd)1799 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1800 struct iwl_bcast_filter_cmd *cmd)
1801 {
1802 struct iwl_bcast_iter_data iter_data = {
1803 .mvm = mvm,
1804 .cmd = cmd,
1805 };
1806
1807 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
1808 return false;
1809
1810 memset(cmd, 0, sizeof(*cmd));
1811 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
1812 cmd->max_macs = ARRAY_SIZE(cmd->macs);
1813
1814 #ifdef CONFIG_IWLWIFI_DEBUGFS
1815 /* use debugfs filters/macs if override is configured */
1816 if (mvm->dbgfs_bcast_filtering.override) {
1817 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
1818 sizeof(cmd->filters));
1819 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
1820 sizeof(cmd->macs));
1821 return true;
1822 }
1823 #endif
1824
1825 /* if no filters are configured, do nothing */
1826 if (!mvm->bcast_filters)
1827 return false;
1828
1829 /* configure and attach these filters for each associated sta vif */
1830 ieee80211_iterate_active_interfaces(
1831 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1832 iwl_mvm_bcast_filter_iterator, &iter_data);
1833
1834 return true;
1835 }
1836
iwl_mvm_configure_bcast_filter(struct iwl_mvm * mvm)1837 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1838 {
1839 struct iwl_bcast_filter_cmd cmd;
1840
1841 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1842 return 0;
1843
1844 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1845 return 0;
1846
1847 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1848 sizeof(cmd), &cmd);
1849 }
1850 #else
iwl_mvm_configure_bcast_filter(struct iwl_mvm * mvm)1851 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1852 {
1853 return 0;
1854 }
1855 #endif
1856
iwl_mvm_update_mu_groups(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1857 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
1858 struct ieee80211_vif *vif)
1859 {
1860 struct iwl_mu_group_mgmt_cmd cmd = {};
1861
1862 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
1863 WLAN_MEMBERSHIP_LEN);
1864 memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
1865 WLAN_USER_POSITION_LEN);
1866
1867 return iwl_mvm_send_cmd_pdu(mvm,
1868 WIDE_ID(DATA_PATH_GROUP,
1869 UPDATE_MU_GROUPS_CMD),
1870 0, sizeof(cmd), &cmd);
1871 }
1872
iwl_mvm_mu_mimo_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1873 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
1874 struct ieee80211_vif *vif)
1875 {
1876 if (vif->mu_mimo_owner) {
1877 struct iwl_mu_group_mgmt_notif *notif = _data;
1878
1879 /*
1880 * MU-MIMO Group Id action frame is little endian. We treat
1881 * the data received from firmware as if it came from the
1882 * action frame, so no conversion is needed.
1883 */
1884 ieee80211_update_mu_groups(vif,
1885 (u8 *)¬if->membership_status,
1886 (u8 *)¬if->user_position);
1887 }
1888 }
1889
iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)1890 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
1891 struct iwl_rx_cmd_buffer *rxb)
1892 {
1893 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1894 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
1895
1896 ieee80211_iterate_active_interfaces_atomic(
1897 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1898 iwl_mvm_mu_mimo_iface_iterator, notif);
1899 }
1900
iwl_mvm_he_get_ppe_val(u8 * ppe,u8 ppe_pos_bit)1901 static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
1902 {
1903 u8 byte_num = ppe_pos_bit / 8;
1904 u8 bit_num = ppe_pos_bit % 8;
1905 u8 residue_bits;
1906 u8 res;
1907
1908 if (bit_num <= 5)
1909 return (ppe[byte_num] >> bit_num) &
1910 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
1911
1912 /*
1913 * If bit_num > 5, we have to combine bits with next byte.
1914 * Calculate how many bits we need to take from current byte (called
1915 * here "residue_bits"), and add them to bits from next byte.
1916 */
1917
1918 residue_bits = 8 - bit_num;
1919
1920 res = (ppe[byte_num + 1] &
1921 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
1922 residue_bits;
1923 res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
1924
1925 return res;
1926 }
1927
iwl_mvm_cfg_he_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif,u8 sta_id)1928 static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
1929 struct ieee80211_vif *vif, u8 sta_id)
1930 {
1931 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1932 struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
1933 .sta_id = sta_id,
1934 .tid_limit = IWL_MAX_TID_COUNT,
1935 .bss_color = vif->bss_conf.bss_color,
1936 .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
1937 .frame_time_rts_th =
1938 cpu_to_le16(vif->bss_conf.frame_time_rts_th),
1939 };
1940 struct ieee80211_sta *sta;
1941 u32 flags;
1942 int i;
1943
1944 rcu_read_lock();
1945
1946 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
1947 if (IS_ERR(sta)) {
1948 rcu_read_unlock();
1949 WARN(1, "Can't find STA to configure HE\n");
1950 return;
1951 }
1952
1953 if (!sta->he_cap.has_he) {
1954 rcu_read_unlock();
1955 return;
1956 }
1957
1958 flags = 0;
1959
1960 /* HTC flags */
1961 if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
1962 IEEE80211_HE_MAC_CAP0_HTC_HE)
1963 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
1964 if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
1965 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
1966 (sta->he_cap.he_cap_elem.mac_cap_info[2] &
1967 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
1968 u8 link_adap =
1969 ((sta->he_cap.he_cap_elem.mac_cap_info[2] &
1970 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
1971 (sta->he_cap.he_cap_elem.mac_cap_info[1] &
1972 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
1973
1974 if (link_adap == 2)
1975 sta_ctxt_cmd.htc_flags |=
1976 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
1977 else if (link_adap == 3)
1978 sta_ctxt_cmd.htc_flags |=
1979 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
1980 }
1981 if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
1982 IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED)
1983 sta_ctxt_cmd.htc_flags |=
1984 cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED);
1985 if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
1986 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
1987 if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
1988 IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
1989 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
1990 if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
1991 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
1992
1993 /* If PPE Thresholds exist, parse them into a FW-familiar format */
1994 if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
1995 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
1996 u8 nss = (sta->he_cap.ppe_thres[0] &
1997 IEEE80211_PPE_THRES_NSS_MASK) + 1;
1998 u8 ru_index_bitmap =
1999 (sta->he_cap.ppe_thres[0] &
2000 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
2001 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
2002 u8 *ppe = &sta->he_cap.ppe_thres[0];
2003 u8 ppe_pos_bit = 7; /* Starting after PPE header */
2004
2005 /*
2006 * FW currently supports only nss == MAX_HE_SUPP_NSS
2007 *
2008 * If nss > MAX: we can ignore values we don't support
2009 * If nss < MAX: we can set zeros in other streams
2010 */
2011 if (nss > MAX_HE_SUPP_NSS) {
2012 IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
2013 MAX_HE_SUPP_NSS);
2014 nss = MAX_HE_SUPP_NSS;
2015 }
2016
2017 for (i = 0; i < nss; i++) {
2018 u8 ru_index_tmp = ru_index_bitmap << 1;
2019 u8 bw;
2020
2021 for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
2022 ru_index_tmp >>= 1;
2023 if (!(ru_index_tmp & 1))
2024 continue;
2025
2026 sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
2027 iwl_mvm_he_get_ppe_val(ppe,
2028 ppe_pos_bit);
2029 ppe_pos_bit +=
2030 IEEE80211_PPE_THRES_INFO_PPET_SIZE;
2031 sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
2032 iwl_mvm_he_get_ppe_val(ppe,
2033 ppe_pos_bit);
2034 ppe_pos_bit +=
2035 IEEE80211_PPE_THRES_INFO_PPET_SIZE;
2036 }
2037 }
2038
2039 flags |= STA_CTXT_HE_PACKET_EXT;
2040 }
2041 rcu_read_unlock();
2042
2043 /* Mark MU EDCA as enabled, unless none detected on some AC */
2044 flags |= STA_CTXT_HE_MU_EDCA_CW;
2045 for (i = 0; i < AC_NUM; i++) {
2046 struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
2047 &mvmvif->queue_params[i].mu_edca_param_rec;
2048
2049 if (!mvmvif->queue_params[i].mu_edca) {
2050 flags &= ~STA_CTXT_HE_MU_EDCA_CW;
2051 break;
2052 }
2053
2054 sta_ctxt_cmd.trig_based_txf[i].cwmin =
2055 cpu_to_le16(mu_edca->ecw_min_max & 0xf);
2056 sta_ctxt_cmd.trig_based_txf[i].cwmax =
2057 cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
2058 sta_ctxt_cmd.trig_based_txf[i].aifsn =
2059 cpu_to_le16(mu_edca->aifsn);
2060 sta_ctxt_cmd.trig_based_txf[i].mu_time =
2061 cpu_to_le16(mu_edca->mu_edca_timer);
2062 }
2063
2064 if (vif->bss_conf.multi_sta_back_32bit)
2065 flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
2066
2067 if (vif->bss_conf.ack_enabled)
2068 flags |= STA_CTXT_HE_ACK_ENABLED;
2069
2070 if (vif->bss_conf.uora_exists) {
2071 flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
2072
2073 sta_ctxt_cmd.rand_alloc_ecwmin =
2074 vif->bss_conf.uora_ocw_range & 0x7;
2075 sta_ctxt_cmd.rand_alloc_ecwmax =
2076 (vif->bss_conf.uora_ocw_range >> 3) & 0x7;
2077 }
2078
2079 /* TODO: support Multi BSSID IE */
2080
2081 sta_ctxt_cmd.flags = cpu_to_le32(flags);
2082
2083 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
2084 DATA_PATH_GROUP, 0),
2085 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
2086 IWL_ERR(mvm, "Failed to config FW to work HE!\n");
2087 }
2088
iwl_mvm_bss_info_changed_station(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2089 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
2090 struct ieee80211_vif *vif,
2091 struct ieee80211_bss_conf *bss_conf,
2092 u32 changes)
2093 {
2094 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2095 int ret;
2096
2097 /*
2098 * Re-calculate the tsf id, as the master-slave relations depend on the
2099 * beacon interval, which was not known when the station interface was
2100 * added.
2101 */
2102 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
2103 if (vif->bss_conf.he_support &&
2104 !iwlwifi_mod_params.disable_11ax)
2105 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
2106
2107 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2108 }
2109
2110 /*
2111 * If we're not associated yet, take the (new) BSSID before associating
2112 * so the firmware knows. If we're already associated, then use the old
2113 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
2114 * branch for disassociation below.
2115 */
2116 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
2117 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2118
2119 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
2120 if (ret)
2121 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2122
2123 /* after sending it once, adopt mac80211 data */
2124 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2125 mvmvif->associated = bss_conf->assoc;
2126
2127 if (changes & BSS_CHANGED_ASSOC) {
2128 if (bss_conf->assoc) {
2129 /* clear statistics to get clean beacon counter */
2130 iwl_mvm_request_statistics(mvm, true);
2131 memset(&mvmvif->beacon_stats, 0,
2132 sizeof(mvmvif->beacon_stats));
2133
2134 /* add quota for this interface */
2135 ret = iwl_mvm_update_quotas(mvm, true, NULL);
2136 if (ret) {
2137 IWL_ERR(mvm, "failed to update quotas\n");
2138 return;
2139 }
2140
2141 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2142 &mvm->status)) {
2143 /*
2144 * If we're restarting then the firmware will
2145 * obviously have lost synchronisation with
2146 * the AP. It will attempt to synchronise by
2147 * itself, but we can make it more reliable by
2148 * scheduling a session protection time event.
2149 *
2150 * The firmware needs to receive a beacon to
2151 * catch up with synchronisation, use 110% of
2152 * the beacon interval.
2153 *
2154 * Set a large maximum delay to allow for more
2155 * than a single interface.
2156 */
2157 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
2158 iwl_mvm_protect_session(mvm, vif, dur, dur,
2159 5 * dur, false);
2160 }
2161
2162 iwl_mvm_sf_update(mvm, vif, false);
2163 iwl_mvm_power_vif_assoc(mvm, vif);
2164 if (vif->p2p) {
2165 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
2166 iwl_mvm_update_smps(mvm, vif,
2167 IWL_MVM_SMPS_REQ_PROT,
2168 IEEE80211_SMPS_DYNAMIC);
2169 }
2170 } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
2171 /*
2172 * If update fails - SF might be running in associated
2173 * mode while disassociated - which is forbidden.
2174 */
2175 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
2176 "Failed to update SF upon disassociation\n");
2177
2178 /*
2179 * If we get an assert during the connection (after the
2180 * station has been added, but before the vif is set
2181 * to associated), mac80211 will re-add the station and
2182 * then configure the vif. Since the vif is not
2183 * associated, we would remove the station here and
2184 * this would fail the recovery.
2185 */
2186 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2187 &mvm->status)) {
2188 /*
2189 * Remove AP station now that
2190 * the MAC is unassoc
2191 */
2192 ret = iwl_mvm_rm_sta_id(mvm, vif,
2193 mvmvif->ap_sta_id);
2194 if (ret)
2195 IWL_ERR(mvm,
2196 "failed to remove AP station\n");
2197
2198 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
2199 mvm->d0i3_ap_sta_id =
2200 IWL_MVM_INVALID_STA;
2201 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
2202 }
2203
2204 /* remove quota for this interface */
2205 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2206 if (ret)
2207 IWL_ERR(mvm, "failed to update quotas\n");
2208
2209 if (vif->p2p)
2210 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
2211
2212 /* this will take the cleared BSSID from bss_conf */
2213 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2214 if (ret)
2215 IWL_ERR(mvm,
2216 "failed to update MAC %pM (clear after unassoc)\n",
2217 vif->addr);
2218 }
2219
2220 /*
2221 * The firmware tracks the MU-MIMO group on its own.
2222 * However, on HW restart we should restore this data.
2223 */
2224 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2225 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
2226 ret = iwl_mvm_update_mu_groups(mvm, vif);
2227 if (ret)
2228 IWL_ERR(mvm,
2229 "failed to update VHT MU_MIMO groups\n");
2230 }
2231
2232 iwl_mvm_recalc_multicast(mvm);
2233 iwl_mvm_configure_bcast_filter(mvm);
2234
2235 /* reset rssi values */
2236 mvmvif->bf_data.ave_beacon_signal = 0;
2237
2238 iwl_mvm_bt_coex_vif_change(mvm);
2239 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2240 IEEE80211_SMPS_AUTOMATIC);
2241 if (fw_has_capa(&mvm->fw->ucode_capa,
2242 IWL_UCODE_TLV_CAPA_UMAC_SCAN))
2243 iwl_mvm_config_scan(mvm);
2244 }
2245
2246 if (changes & BSS_CHANGED_BEACON_INFO) {
2247 /*
2248 * We received a beacon from the associated AP so
2249 * remove the session protection.
2250 */
2251 iwl_mvm_stop_session_protection(mvm, vif);
2252
2253 iwl_mvm_sf_update(mvm, vif, false);
2254 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2255 }
2256
2257 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
2258 /*
2259 * Send power command on every beacon change,
2260 * because we may have not enabled beacon abort yet.
2261 */
2262 BSS_CHANGED_BEACON_INFO)) {
2263 ret = iwl_mvm_power_update_mac(mvm);
2264 if (ret)
2265 IWL_ERR(mvm, "failed to update power mode\n");
2266 }
2267
2268 if (changes & BSS_CHANGED_TXPOWER) {
2269 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2270 bss_conf->txpower);
2271 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2272 }
2273
2274 if (changes & BSS_CHANGED_CQM) {
2275 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2276 /* reset cqm events tracking */
2277 mvmvif->bf_data.last_cqm_event = 0;
2278 if (mvmvif->bf_data.bf_enabled) {
2279 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2280 if (ret)
2281 IWL_ERR(mvm,
2282 "failed to update CQM thresholds\n");
2283 }
2284 }
2285
2286 if (changes & BSS_CHANGED_ARP_FILTER) {
2287 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2288 iwl_mvm_configure_bcast_filter(mvm);
2289 }
2290 }
2291
iwl_mvm_start_ap_ibss(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2292 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2293 struct ieee80211_vif *vif)
2294 {
2295 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2296 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2297 int ret;
2298
2299 /*
2300 * iwl_mvm_mac_ctxt_add() might read directly from the device
2301 * (the system time), so make sure it is available.
2302 */
2303 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2304 if (ret)
2305 return ret;
2306
2307 mutex_lock(&mvm->mutex);
2308
2309 /* Send the beacon template */
2310 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2311 if (ret)
2312 goto out_unlock;
2313
2314 /*
2315 * Re-calculate the tsf id, as the master-slave relations depend on the
2316 * beacon interval, which was not known when the AP interface was added.
2317 */
2318 if (vif->type == NL80211_IFTYPE_AP)
2319 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2320
2321 mvmvif->ap_assoc_sta_count = 0;
2322
2323 /* Add the mac context */
2324 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2325 if (ret)
2326 goto out_unlock;
2327
2328 /* Perform the binding */
2329 ret = iwl_mvm_binding_add_vif(mvm, vif);
2330 if (ret)
2331 goto out_remove;
2332
2333 /*
2334 * This is not very nice, but the simplest:
2335 * For older FWs adding the mcast sta before the bcast station may
2336 * cause assert 0x2b00.
2337 * This is fixed in later FW so make the order of removal depend on
2338 * the TLV
2339 */
2340 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2341 ret = iwl_mvm_add_mcast_sta(mvm, vif);
2342 if (ret)
2343 goto out_unbind;
2344 /*
2345 * Send the bcast station. At this stage the TBTT and DTIM time
2346 * events are added and applied to the scheduler
2347 */
2348 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2349 if (ret) {
2350 iwl_mvm_rm_mcast_sta(mvm, vif);
2351 goto out_unbind;
2352 }
2353 } else {
2354 /*
2355 * Send the bcast station. At this stage the TBTT and DTIM time
2356 * events are added and applied to the scheduler
2357 */
2358 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2359 if (ret)
2360 goto out_unbind;
2361 ret = iwl_mvm_add_mcast_sta(mvm, vif);
2362 if (ret) {
2363 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2364 goto out_unbind;
2365 }
2366 }
2367
2368 /* must be set before quota calculations */
2369 mvmvif->ap_ibss_active = true;
2370
2371 /* power updated needs to be done before quotas */
2372 iwl_mvm_power_update_mac(mvm);
2373
2374 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2375 if (ret)
2376 goto out_quota_failed;
2377
2378 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2379 if (vif->p2p && mvm->p2p_device_vif)
2380 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2381
2382 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2383
2384 iwl_mvm_bt_coex_vif_change(mvm);
2385
2386 /* we don't support TDLS during DCM */
2387 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2388 iwl_mvm_teardown_tdls_peers(mvm);
2389
2390 goto out_unlock;
2391
2392 out_quota_failed:
2393 iwl_mvm_power_update_mac(mvm);
2394 mvmvif->ap_ibss_active = false;
2395 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2396 iwl_mvm_rm_mcast_sta(mvm, vif);
2397 out_unbind:
2398 iwl_mvm_binding_remove_vif(mvm, vif);
2399 out_remove:
2400 iwl_mvm_mac_ctxt_remove(mvm, vif);
2401 out_unlock:
2402 mutex_unlock(&mvm->mutex);
2403 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2404 return ret;
2405 }
2406
iwl_mvm_stop_ap_ibss(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2407 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2408 struct ieee80211_vif *vif)
2409 {
2410 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2411 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2412
2413 iwl_mvm_prepare_mac_removal(mvm, vif);
2414
2415 mutex_lock(&mvm->mutex);
2416
2417 /* Handle AP stop while in CSA */
2418 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2419 iwl_mvm_remove_time_event(mvm, mvmvif,
2420 &mvmvif->time_event_data);
2421 RCU_INIT_POINTER(mvm->csa_vif, NULL);
2422 mvmvif->csa_countdown = false;
2423 }
2424
2425 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2426 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2427 mvm->csa_tx_block_bcn_timeout = 0;
2428 }
2429
2430 mvmvif->ap_ibss_active = false;
2431 mvm->ap_last_beacon_gp2 = 0;
2432
2433 iwl_mvm_bt_coex_vif_change(mvm);
2434
2435 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2436
2437 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2438 if (vif->p2p && mvm->p2p_device_vif)
2439 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2440
2441 iwl_mvm_update_quotas(mvm, false, NULL);
2442
2443 /*
2444 * This is not very nice, but the simplest:
2445 * For older FWs removing the mcast sta before the bcast station may
2446 * cause assert 0x2b00.
2447 * This is fixed in later FW (which will stop beaconing when removing
2448 * bcast station).
2449 * So make the order of removal depend on the TLV
2450 */
2451 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
2452 iwl_mvm_rm_mcast_sta(mvm, vif);
2453 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2454 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
2455 iwl_mvm_rm_mcast_sta(mvm, vif);
2456 iwl_mvm_binding_remove_vif(mvm, vif);
2457
2458 iwl_mvm_power_update_mac(mvm);
2459
2460 iwl_mvm_mac_ctxt_remove(mvm, vif);
2461
2462 mutex_unlock(&mvm->mutex);
2463 }
2464
2465 static void
iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2466 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2467 struct ieee80211_vif *vif,
2468 struct ieee80211_bss_conf *bss_conf,
2469 u32 changes)
2470 {
2471 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2472
2473 /* Changes will be applied when the AP/IBSS is started */
2474 if (!mvmvif->ap_ibss_active)
2475 return;
2476
2477 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2478 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2479 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2480 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2481
2482 /* Need to send a new beacon template to the FW */
2483 if (changes & BSS_CHANGED_BEACON &&
2484 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2485 IWL_WARN(mvm, "Failed updating beacon data\n");
2486
2487 if (changes & BSS_CHANGED_TXPOWER) {
2488 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2489 bss_conf->txpower);
2490 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2491 }
2492 }
2493
iwl_mvm_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2494 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2495 struct ieee80211_vif *vif,
2496 struct ieee80211_bss_conf *bss_conf,
2497 u32 changes)
2498 {
2499 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2500
2501 /*
2502 * iwl_mvm_bss_info_changed_station() might call
2503 * iwl_mvm_protect_session(), which reads directly from
2504 * the device (the system time), so make sure it is available.
2505 */
2506 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2507 return;
2508
2509 mutex_lock(&mvm->mutex);
2510
2511 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2512 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2513
2514 switch (vif->type) {
2515 case NL80211_IFTYPE_STATION:
2516 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2517 break;
2518 case NL80211_IFTYPE_AP:
2519 case NL80211_IFTYPE_ADHOC:
2520 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2521 break;
2522 case NL80211_IFTYPE_MONITOR:
2523 if (changes & BSS_CHANGED_MU_GROUPS)
2524 iwl_mvm_update_mu_groups(mvm, vif);
2525 break;
2526 default:
2527 /* shouldn't happen */
2528 WARN_ON_ONCE(1);
2529 }
2530
2531 mutex_unlock(&mvm->mutex);
2532 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2533 }
2534
iwl_mvm_mac_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)2535 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2536 struct ieee80211_vif *vif,
2537 struct ieee80211_scan_request *hw_req)
2538 {
2539 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2540 int ret;
2541
2542 if (hw_req->req.n_channels == 0 ||
2543 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2544 return -EINVAL;
2545
2546 mutex_lock(&mvm->mutex);
2547 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2548 mutex_unlock(&mvm->mutex);
2549
2550 return ret;
2551 }
2552
iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2553 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2554 struct ieee80211_vif *vif)
2555 {
2556 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2557
2558 mutex_lock(&mvm->mutex);
2559
2560 /* Due to a race condition, it's possible that mac80211 asks
2561 * us to stop a hw_scan when it's already stopped. This can
2562 * happen, for instance, if we stopped the scan ourselves,
2563 * called ieee80211_scan_completed() and the userspace called
2564 * cancel scan scan before ieee80211_scan_work() could run.
2565 * To handle that, simply return if the scan is not running.
2566 */
2567 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2568 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2569
2570 mutex_unlock(&mvm->mutex);
2571 }
2572
2573 static void
iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int num_frames,enum ieee80211_frame_release_type reason,bool more_data)2574 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2575 struct ieee80211_sta *sta, u16 tids,
2576 int num_frames,
2577 enum ieee80211_frame_release_type reason,
2578 bool more_data)
2579 {
2580 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2581
2582 /* Called when we need to transmit (a) frame(s) from mac80211 */
2583
2584 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2585 tids, more_data, false);
2586 }
2587
2588 static void
iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int num_frames,enum ieee80211_frame_release_type reason,bool more_data)2589 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2590 struct ieee80211_sta *sta, u16 tids,
2591 int num_frames,
2592 enum ieee80211_frame_release_type reason,
2593 bool more_data)
2594 {
2595 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2596
2597 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
2598
2599 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2600 tids, more_data, true);
2601 }
2602
__iwl_mvm_mac_sta_notify(struct ieee80211_hw * hw,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)2603 static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2604 enum sta_notify_cmd cmd,
2605 struct ieee80211_sta *sta)
2606 {
2607 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2608 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2609 unsigned long txqs = 0, tids = 0;
2610 int tid;
2611
2612 /*
2613 * If we have TVQM then we get too high queue numbers - luckily
2614 * we really shouldn't get here with that because such hardware
2615 * should have firmware supporting buffer station offload.
2616 */
2617 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
2618 return;
2619
2620 spin_lock_bh(&mvmsta->lock);
2621 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2622 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2623
2624 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
2625 continue;
2626
2627 __set_bit(tid_data->txq_id, &txqs);
2628
2629 if (iwl_mvm_tid_queued(mvm, tid_data) == 0)
2630 continue;
2631
2632 __set_bit(tid, &tids);
2633 }
2634
2635 switch (cmd) {
2636 case STA_NOTIFY_SLEEP:
2637 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2638 ieee80211_sta_set_buffered(sta, tid, true);
2639
2640 if (txqs)
2641 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2642 /*
2643 * The fw updates the STA to be asleep. Tx packets on the Tx
2644 * queues to this station will not be transmitted. The fw will
2645 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2646 */
2647 break;
2648 case STA_NOTIFY_AWAKE:
2649 if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA))
2650 break;
2651
2652 if (txqs)
2653 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2654 iwl_mvm_sta_modify_ps_wake(mvm, sta);
2655 break;
2656 default:
2657 break;
2658 }
2659 spin_unlock_bh(&mvmsta->lock);
2660 }
2661
iwl_mvm_mac_sta_notify(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)2662 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2663 struct ieee80211_vif *vif,
2664 enum sta_notify_cmd cmd,
2665 struct ieee80211_sta *sta)
2666 {
2667 __iwl_mvm_mac_sta_notify(hw, cmd, sta);
2668 }
2669
iwl_mvm_sta_pm_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)2670 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2671 {
2672 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2673 struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data;
2674 struct ieee80211_sta *sta;
2675 struct iwl_mvm_sta *mvmsta;
2676 bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE);
2677
2678 if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
2679 return;
2680
2681 rcu_read_lock();
2682 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
2683 if (WARN_ON(IS_ERR_OR_NULL(sta))) {
2684 rcu_read_unlock();
2685 return;
2686 }
2687
2688 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2689
2690 if (!mvmsta->vif ||
2691 mvmsta->vif->type != NL80211_IFTYPE_AP) {
2692 rcu_read_unlock();
2693 return;
2694 }
2695
2696 if (mvmsta->sleeping != sleeping) {
2697 mvmsta->sleeping = sleeping;
2698 __iwl_mvm_mac_sta_notify(mvm->hw,
2699 sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE,
2700 sta);
2701 ieee80211_sta_ps_transition(sta, sleeping);
2702 }
2703
2704 if (sleeping) {
2705 switch (notif->type) {
2706 case IWL_MVM_PM_EVENT_AWAKE:
2707 case IWL_MVM_PM_EVENT_ASLEEP:
2708 break;
2709 case IWL_MVM_PM_EVENT_UAPSD:
2710 ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS);
2711 break;
2712 case IWL_MVM_PM_EVENT_PS_POLL:
2713 ieee80211_sta_pspoll(sta);
2714 break;
2715 default:
2716 break;
2717 }
2718 }
2719
2720 rcu_read_unlock();
2721 }
2722
iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)2723 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2724 struct ieee80211_vif *vif,
2725 struct ieee80211_sta *sta)
2726 {
2727 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2728 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2729
2730 /*
2731 * This is called before mac80211 does RCU synchronisation,
2732 * so here we already invalidate our internal RCU-protected
2733 * station pointer. The rest of the code will thus no longer
2734 * be able to find the station this way, and we don't rely
2735 * on further RCU synchronisation after the sta_state()
2736 * callback deleted the station.
2737 */
2738 mutex_lock(&mvm->mutex);
2739 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2740 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2741 ERR_PTR(-ENOENT));
2742
2743 mutex_unlock(&mvm->mutex);
2744 }
2745
iwl_mvm_check_uapsd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const u8 * bssid)2746 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2747 const u8 *bssid)
2748 {
2749 int i;
2750
2751 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2752 struct iwl_mvm_tcm_mac *mdata;
2753
2754 mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id];
2755 ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
2756 mdata->opened_rx_ba_sessions = false;
2757 }
2758
2759 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2760 return;
2761
2762 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
2763 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2764 return;
2765 }
2766
2767 if (!vif->p2p &&
2768 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
2769 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2770 return;
2771 }
2772
2773 for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) {
2774 if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) {
2775 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2776 return;
2777 }
2778 }
2779
2780 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2781 }
2782
2783 static void
iwl_mvm_tdls_check_trigger(struct iwl_mvm * mvm,struct ieee80211_vif * vif,u8 * peer_addr,enum nl80211_tdls_operation action)2784 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
2785 struct ieee80211_vif *vif, u8 *peer_addr,
2786 enum nl80211_tdls_operation action)
2787 {
2788 struct iwl_fw_dbg_trigger_tlv *trig;
2789 struct iwl_fw_dbg_trigger_tdls *tdls_trig;
2790
2791 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
2792 return;
2793
2794 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
2795 tdls_trig = (void *)trig->data;
2796 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
2797 ieee80211_vif_to_wdev(vif), trig))
2798 return;
2799
2800 if (!(tdls_trig->action_bitmap & BIT(action)))
2801 return;
2802
2803 if (tdls_trig->peer_mode &&
2804 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
2805 return;
2806
2807 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
2808 "TDLS event occurred, peer %pM, action %d",
2809 peer_addr, action);
2810 }
2811
iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm * mvm,struct iwl_mvm_sta * mvm_sta)2812 static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
2813 struct iwl_mvm_sta *mvm_sta)
2814 {
2815 struct iwl_mvm_tid_data *tid_data;
2816 struct sk_buff *skb;
2817 int i;
2818
2819 spin_lock_bh(&mvm_sta->lock);
2820 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
2821 tid_data = &mvm_sta->tid_data[i];
2822
2823 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
2824 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2825
2826 /*
2827 * The first deferred frame should've stopped the MAC
2828 * queues, so we should never get a second deferred
2829 * frame for the RA/TID.
2830 */
2831 iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
2832 ieee80211_free_txskb(mvm->hw, skb);
2833 }
2834 }
2835 spin_unlock_bh(&mvm_sta->lock);
2836 }
2837
iwl_mvm_mac_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)2838 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2839 struct ieee80211_vif *vif,
2840 struct ieee80211_sta *sta,
2841 enum ieee80211_sta_state old_state,
2842 enum ieee80211_sta_state new_state)
2843 {
2844 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2845 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2846 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2847 int ret;
2848
2849 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2850 sta->addr, old_state, new_state);
2851
2852 /* this would be a mac80211 bug ... but don't crash */
2853 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2854 return -EINVAL;
2855
2856 /*
2857 * If we are in a STA removal flow and in DQA mode:
2858 *
2859 * This is after the sync_rcu part, so the queues have already been
2860 * flushed. No more TXs on their way in mac80211's path, and no more in
2861 * the queues.
2862 * Also, we won't be getting any new TX frames for this station.
2863 * What we might have are deferred TX frames that need to be taken care
2864 * of.
2865 *
2866 * Drop any still-queued deferred-frame before removing the STA, and
2867 * make sure the worker is no longer handling frames for this STA.
2868 */
2869 if (old_state == IEEE80211_STA_NONE &&
2870 new_state == IEEE80211_STA_NOTEXIST) {
2871 iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
2872 flush_work(&mvm->add_stream_wk);
2873
2874 /*
2875 * No need to make sure deferred TX indication is off since the
2876 * worker will already remove it if it was on
2877 */
2878 }
2879
2880 mutex_lock(&mvm->mutex);
2881 /* track whether or not the station is associated */
2882 mvm_sta->sta_state = new_state;
2883
2884 if (old_state == IEEE80211_STA_NOTEXIST &&
2885 new_state == IEEE80211_STA_NONE) {
2886 /*
2887 * Firmware bug - it'll crash if the beacon interval is less
2888 * than 16. We can't avoid connecting at all, so refuse the
2889 * station state change, this will cause mac80211 to abandon
2890 * attempts to connect to this AP, and eventually wpa_s will
2891 * blacklist the AP...
2892 */
2893 if (vif->type == NL80211_IFTYPE_STATION &&
2894 vif->bss_conf.beacon_int < 16) {
2895 IWL_ERR(mvm,
2896 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2897 sta->addr, vif->bss_conf.beacon_int);
2898 ret = -EINVAL;
2899 goto out_unlock;
2900 }
2901
2902 if (sta->tdls &&
2903 (vif->p2p ||
2904 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2905 IWL_MVM_TDLS_STA_COUNT ||
2906 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2907 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2908 ret = -EBUSY;
2909 goto out_unlock;
2910 }
2911
2912 ret = iwl_mvm_add_sta(mvm, vif, sta);
2913 if (sta->tdls && ret == 0) {
2914 iwl_mvm_recalc_tdls_state(mvm, vif, true);
2915 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2916 NL80211_TDLS_SETUP);
2917 }
2918 } else if (old_state == IEEE80211_STA_NONE &&
2919 new_state == IEEE80211_STA_AUTH) {
2920 /*
2921 * EBS may be disabled due to previous failures reported by FW.
2922 * Reset EBS status here assuming environment has been changed.
2923 */
2924 mvm->last_ebs_successful = true;
2925 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2926 ret = 0;
2927 } else if (old_state == IEEE80211_STA_AUTH &&
2928 new_state == IEEE80211_STA_ASSOC) {
2929 if (vif->type == NL80211_IFTYPE_AP) {
2930 mvmvif->ap_assoc_sta_count++;
2931 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2932 }
2933
2934 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
2935 ret = iwl_mvm_update_sta(mvm, vif, sta);
2936 } else if (old_state == IEEE80211_STA_ASSOC &&
2937 new_state == IEEE80211_STA_AUTHORIZED) {
2938
2939 /* we don't support TDLS during DCM */
2940 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2941 iwl_mvm_teardown_tdls_peers(mvm);
2942
2943 if (sta->tdls)
2944 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2945 NL80211_TDLS_ENABLE_LINK);
2946
2947 /* enable beacon filtering */
2948 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2949
2950 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
2951
2952 ret = 0;
2953 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2954 new_state == IEEE80211_STA_ASSOC) {
2955 /* disable beacon filtering */
2956 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2957 ret = 0;
2958 } else if (old_state == IEEE80211_STA_ASSOC &&
2959 new_state == IEEE80211_STA_AUTH) {
2960 if (vif->type == NL80211_IFTYPE_AP) {
2961 mvmvif->ap_assoc_sta_count--;
2962 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2963 }
2964 ret = 0;
2965 } else if (old_state == IEEE80211_STA_AUTH &&
2966 new_state == IEEE80211_STA_NONE) {
2967 ret = 0;
2968 } else if (old_state == IEEE80211_STA_NONE &&
2969 new_state == IEEE80211_STA_NOTEXIST) {
2970 ret = iwl_mvm_rm_sta(mvm, vif, sta);
2971 if (sta->tdls) {
2972 iwl_mvm_recalc_tdls_state(mvm, vif, false);
2973 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2974 NL80211_TDLS_DISABLE_LINK);
2975 }
2976 } else {
2977 ret = -EIO;
2978 }
2979 out_unlock:
2980 mutex_unlock(&mvm->mutex);
2981
2982 if (sta->tdls && ret == 0) {
2983 if (old_state == IEEE80211_STA_NOTEXIST &&
2984 new_state == IEEE80211_STA_NONE)
2985 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2986 else if (old_state == IEEE80211_STA_NONE &&
2987 new_state == IEEE80211_STA_NOTEXIST)
2988 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2989 }
2990
2991 return ret;
2992 }
2993
iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw * hw,u32 value)2994 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2995 {
2996 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2997
2998 mvm->rts_threshold = value;
2999
3000 return 0;
3001 }
3002
iwl_mvm_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)3003 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
3004 struct ieee80211_vif *vif,
3005 struct ieee80211_sta *sta, u32 changed)
3006 {
3007 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3008
3009 if (vif->type == NL80211_IFTYPE_STATION &&
3010 changed & IEEE80211_RC_NSS_CHANGED)
3011 iwl_mvm_sf_update(mvm, vif, false);
3012 }
3013
iwl_mvm_mac_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 ac,const struct ieee80211_tx_queue_params * params)3014 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
3015 struct ieee80211_vif *vif, u16 ac,
3016 const struct ieee80211_tx_queue_params *params)
3017 {
3018 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3019 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3020
3021 mvmvif->queue_params[ac] = *params;
3022
3023 /*
3024 * No need to update right away, we'll get BSS_CHANGED_QOS
3025 * The exception is P2P_DEVICE interface which needs immediate update.
3026 */
3027 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
3028 int ret;
3029
3030 mutex_lock(&mvm->mutex);
3031 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3032 mutex_unlock(&mvm->mutex);
3033 return ret;
3034 }
3035 return 0;
3036 }
3037
iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 req_duration)3038 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
3039 struct ieee80211_vif *vif,
3040 u16 req_duration)
3041 {
3042 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3043 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3044 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
3045
3046 /*
3047 * iwl_mvm_protect_session() reads directly from the device
3048 * (the system time), so make sure it is available.
3049 */
3050 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
3051 return;
3052
3053 if (req_duration > duration)
3054 duration = req_duration;
3055
3056 mutex_lock(&mvm->mutex);
3057 /* Try really hard to protect the session and hear a beacon */
3058 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
3059 mutex_unlock(&mvm->mutex);
3060
3061 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
3062 }
3063
iwl_mvm_mac_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3064 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
3065 struct ieee80211_vif *vif,
3066 struct cfg80211_sched_scan_request *req,
3067 struct ieee80211_scan_ies *ies)
3068 {
3069 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3070
3071 int ret;
3072
3073 mutex_lock(&mvm->mutex);
3074
3075 if (!vif->bss_conf.idle) {
3076 ret = -EBUSY;
3077 goto out;
3078 }
3079
3080 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
3081
3082 out:
3083 mutex_unlock(&mvm->mutex);
3084 return ret;
3085 }
3086
iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3087 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
3088 struct ieee80211_vif *vif)
3089 {
3090 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3091 int ret;
3092
3093 mutex_lock(&mvm->mutex);
3094
3095 /* Due to a race condition, it's possible that mac80211 asks
3096 * us to stop a sched_scan when it's already stopped. This
3097 * can happen, for instance, if we stopped the scan ourselves,
3098 * called ieee80211_sched_scan_stopped() and the userspace called
3099 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
3100 * could run. To handle this, simply return if the scan is
3101 * not running.
3102 */
3103 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
3104 mutex_unlock(&mvm->mutex);
3105 return 0;
3106 }
3107
3108 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
3109 mutex_unlock(&mvm->mutex);
3110 iwl_mvm_wait_for_async_handlers(mvm);
3111
3112 return ret;
3113 }
3114
iwl_mvm_mac_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)3115 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3116 enum set_key_cmd cmd,
3117 struct ieee80211_vif *vif,
3118 struct ieee80211_sta *sta,
3119 struct ieee80211_key_conf *key)
3120 {
3121 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3122 struct iwl_mvm_sta *mvmsta;
3123 struct iwl_mvm_key_pn *ptk_pn;
3124 int keyidx = key->keyidx;
3125 int ret;
3126 u8 key_offset;
3127
3128 if (iwlwifi_mod_params.swcrypto) {
3129 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
3130 return -EOPNOTSUPP;
3131 }
3132
3133 switch (key->cipher) {
3134 case WLAN_CIPHER_SUITE_TKIP:
3135 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3136 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3137 break;
3138 case WLAN_CIPHER_SUITE_CCMP:
3139 case WLAN_CIPHER_SUITE_GCMP:
3140 case WLAN_CIPHER_SUITE_GCMP_256:
3141 if (!iwl_mvm_has_new_tx_api(mvm))
3142 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3143 break;
3144 case WLAN_CIPHER_SUITE_AES_CMAC:
3145 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3146 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3147 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
3148 break;
3149 case WLAN_CIPHER_SUITE_WEP40:
3150 case WLAN_CIPHER_SUITE_WEP104:
3151 /* For non-client mode, only use WEP keys for TX as we probably
3152 * don't have a station yet anyway and would then have to keep
3153 * track of the keys, linking them to each of the clients/peers
3154 * as they appear. For now, don't do that, for performance WEP
3155 * offload doesn't really matter much, but we need it for some
3156 * other offload features in client mode.
3157 */
3158 if (vif->type != NL80211_IFTYPE_STATION)
3159 return 0;
3160 break;
3161 default:
3162 /* currently FW supports only one optional cipher scheme */
3163 if (hw->n_cipher_schemes &&
3164 hw->cipher_schemes->cipher == key->cipher)
3165 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3166 else
3167 return -EOPNOTSUPP;
3168 }
3169
3170 mutex_lock(&mvm->mutex);
3171
3172 switch (cmd) {
3173 case SET_KEY:
3174 if ((vif->type == NL80211_IFTYPE_ADHOC ||
3175 vif->type == NL80211_IFTYPE_AP) && !sta) {
3176 /*
3177 * GTK on AP interface is a TX-only key, return 0;
3178 * on IBSS they're per-station and because we're lazy
3179 * we don't support them for RX, so do the same.
3180 * CMAC/GMAC in AP/IBSS modes must be done in software.
3181 */
3182 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3183 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3184 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3185 ret = -EOPNOTSUPP;
3186 else
3187 ret = 0;
3188
3189 if (key->cipher != WLAN_CIPHER_SUITE_GCMP &&
3190 key->cipher != WLAN_CIPHER_SUITE_GCMP_256 &&
3191 !iwl_mvm_has_new_tx_api(mvm)) {
3192 key->hw_key_idx = STA_KEY_IDX_INVALID;
3193 break;
3194 }
3195 }
3196
3197 /* During FW restart, in order to restore the state as it was,
3198 * don't try to reprogram keys we previously failed for.
3199 */
3200 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
3201 key->hw_key_idx == STA_KEY_IDX_INVALID) {
3202 IWL_DEBUG_MAC80211(mvm,
3203 "skip invalid idx key programming during restart\n");
3204 ret = 0;
3205 break;
3206 }
3207
3208 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
3209 sta && iwl_mvm_has_new_rx_api(mvm) &&
3210 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
3211 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
3212 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
3213 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
3214 struct ieee80211_key_seq seq;
3215 int tid, q;
3216
3217 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3218 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
3219 ptk_pn = kzalloc(struct_size(ptk_pn, q,
3220 mvm->trans->num_rx_queues),
3221 GFP_KERNEL);
3222 if (!ptk_pn) {
3223 ret = -ENOMEM;
3224 break;
3225 }
3226
3227 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
3228 ieee80211_get_key_rx_seq(key, tid, &seq);
3229 for (q = 0; q < mvm->trans->num_rx_queues; q++)
3230 memcpy(ptk_pn->q[q].pn[tid],
3231 seq.ccmp.pn,
3232 IEEE80211_CCMP_PN_LEN);
3233 }
3234
3235 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn);
3236 }
3237
3238 /* in HW restart reuse the index, otherwise request a new one */
3239 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
3240 key_offset = key->hw_key_idx;
3241 else
3242 key_offset = STA_KEY_IDX_INVALID;
3243
3244 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
3245 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
3246 if (ret) {
3247 IWL_WARN(mvm, "set key failed\n");
3248 /*
3249 * can't add key for RX, but we don't need it
3250 * in the device for TX so still return 0
3251 */
3252 key->hw_key_idx = STA_KEY_IDX_INVALID;
3253 ret = 0;
3254 }
3255
3256 break;
3257 case DISABLE_KEY:
3258 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
3259 ret = 0;
3260 break;
3261 }
3262
3263 if (sta && iwl_mvm_has_new_rx_api(mvm) &&
3264 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
3265 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
3266 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
3267 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
3268 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3269 ptk_pn = rcu_dereference_protected(
3270 mvmsta->ptk_pn[keyidx],
3271 lockdep_is_held(&mvm->mutex));
3272 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
3273 if (ptk_pn)
3274 kfree_rcu(ptk_pn, rcu_head);
3275 }
3276
3277 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
3278 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
3279 break;
3280 default:
3281 ret = -EINVAL;
3282 }
3283
3284 mutex_unlock(&mvm->mutex);
3285 return ret;
3286 }
3287
iwl_mvm_mac_update_tkip_key(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_key_conf * keyconf,struct ieee80211_sta * sta,u32 iv32,u16 * phase1key)3288 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
3289 struct ieee80211_vif *vif,
3290 struct ieee80211_key_conf *keyconf,
3291 struct ieee80211_sta *sta,
3292 u32 iv32, u16 *phase1key)
3293 {
3294 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3295
3296 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
3297 return;
3298
3299 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
3300 }
3301
3302
iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)3303 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
3304 struct iwl_rx_packet *pkt, void *data)
3305 {
3306 struct iwl_mvm *mvm =
3307 container_of(notif_wait, struct iwl_mvm, notif_wait);
3308 struct iwl_hs20_roc_res *resp;
3309 int resp_len = iwl_rx_packet_payload_len(pkt);
3310 struct iwl_mvm_time_event_data *te_data = data;
3311
3312 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
3313 return true;
3314
3315 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
3316 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
3317 return true;
3318 }
3319
3320 resp = (void *)pkt->data;
3321
3322 IWL_DEBUG_TE(mvm,
3323 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
3324 resp->status, resp->event_unique_id);
3325
3326 te_data->uid = le32_to_cpu(resp->event_unique_id);
3327 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3328 te_data->uid);
3329
3330 spin_lock_bh(&mvm->time_event_lock);
3331 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3332 spin_unlock_bh(&mvm->time_event_lock);
3333
3334 return true;
3335 }
3336
3337 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
3338 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
3339 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
3340 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
3341 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
iwl_mvm_send_aux_roc_cmd(struct iwl_mvm * mvm,struct ieee80211_channel * channel,struct ieee80211_vif * vif,int duration)3342 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3343 struct ieee80211_channel *channel,
3344 struct ieee80211_vif *vif,
3345 int duration)
3346 {
3347 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3348 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3349 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
3350 static const u16 time_event_response[] = { HOT_SPOT_CMD };
3351 struct iwl_notification_wait wait_time_event;
3352 u32 dtim_interval = vif->bss_conf.dtim_period *
3353 vif->bss_conf.beacon_int;
3354 u32 req_dur, delay;
3355 struct iwl_hs20_roc_req aux_roc_req = {
3356 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3357 .id_and_color =
3358 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3359 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3360 /* Set the channel info data */
3361 .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
3362 PHY_BAND_24 : PHY_BAND_5,
3363 .channel_info.channel = channel->hw_value,
3364 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
3365 /* Set the time and duration */
3366 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
3367 };
3368
3369 delay = AUX_ROC_MIN_DELAY;
3370 req_dur = MSEC_TO_TU(duration);
3371
3372 /*
3373 * If we are associated we want the delay time to be at least one
3374 * dtim interval so that the FW can wait until after the DTIM and
3375 * then start the time event, this will potentially allow us to
3376 * remain off-channel for the max duration.
3377 * Since we want to use almost a whole dtim interval we would also
3378 * like the delay to be for 2-3 dtim intervals, in case there are
3379 * other time events with higher priority.
3380 */
3381 if (vif->bss_conf.assoc) {
3382 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
3383 /* We cannot remain off-channel longer than the DTIM interval */
3384 if (dtim_interval <= req_dur) {
3385 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER;
3386 if (req_dur <= AUX_ROC_MIN_DURATION)
3387 req_dur = dtim_interval -
3388 AUX_ROC_MIN_SAFETY_BUFFER;
3389 }
3390 }
3391
3392 aux_roc_req.duration = cpu_to_le32(req_dur);
3393 aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
3394
3395 IWL_DEBUG_TE(mvm,
3396 "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
3397 channel->hw_value, req_dur, duration, delay,
3398 dtim_interval);
3399 /* Set the node address */
3400 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3401
3402 lockdep_assert_held(&mvm->mutex);
3403
3404 spin_lock_bh(&mvm->time_event_lock);
3405
3406 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3407 spin_unlock_bh(&mvm->time_event_lock);
3408 return -EIO;
3409 }
3410
3411 te_data->vif = vif;
3412 te_data->duration = duration;
3413 te_data->id = HOT_SPOT_CMD;
3414
3415 spin_unlock_bh(&mvm->time_event_lock);
3416
3417 /*
3418 * Use a notification wait, which really just processes the
3419 * command response and doesn't wait for anything, in order
3420 * to be able to process the response and get the UID inside
3421 * the RX path. Using CMD_WANT_SKB doesn't work because it
3422 * stores the buffer and then wakes up this thread, by which
3423 * time another notification (that the time event started)
3424 * might already be processed unsuccessfully.
3425 */
3426 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3427 time_event_response,
3428 ARRAY_SIZE(time_event_response),
3429 iwl_mvm_rx_aux_roc, te_data);
3430
3431 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3432 &aux_roc_req);
3433
3434 if (res) {
3435 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3436 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3437 goto out_clear_te;
3438 }
3439
3440 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3441 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3442 /* should never fail */
3443 WARN_ON_ONCE(res);
3444
3445 if (res) {
3446 out_clear_te:
3447 spin_lock_bh(&mvm->time_event_lock);
3448 iwl_mvm_te_clear_data(mvm, te_data);
3449 spin_unlock_bh(&mvm->time_event_lock);
3450 }
3451
3452 return res;
3453 }
3454
iwl_mvm_roc(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * channel,int duration,enum ieee80211_roc_type type)3455 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3456 struct ieee80211_vif *vif,
3457 struct ieee80211_channel *channel,
3458 int duration,
3459 enum ieee80211_roc_type type)
3460 {
3461 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3462 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3463 struct cfg80211_chan_def chandef;
3464 struct iwl_mvm_phy_ctxt *phy_ctxt;
3465 int ret, i;
3466
3467 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3468 duration, type);
3469
3470 /*
3471 * Flush the done work, just in case it's still pending, so that
3472 * the work it does can complete and we can accept new frames.
3473 */
3474 flush_work(&mvm->roc_done_wk);
3475
3476 mutex_lock(&mvm->mutex);
3477
3478 switch (vif->type) {
3479 case NL80211_IFTYPE_STATION:
3480 if (fw_has_capa(&mvm->fw->ucode_capa,
3481 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3482 /* Use aux roc framework (HS20) */
3483 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3484 vif, duration);
3485 goto out_unlock;
3486 }
3487 IWL_ERR(mvm, "hotspot not supported\n");
3488 ret = -EINVAL;
3489 goto out_unlock;
3490 case NL80211_IFTYPE_P2P_DEVICE:
3491 /* handle below */
3492 break;
3493 default:
3494 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3495 ret = -EINVAL;
3496 goto out_unlock;
3497 }
3498
3499 for (i = 0; i < NUM_PHY_CTX; i++) {
3500 phy_ctxt = &mvm->phy_ctxts[i];
3501 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3502 continue;
3503
3504 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3505 /*
3506 * Unbind the P2P_DEVICE from the current PHY context,
3507 * and if the PHY context is not used remove it.
3508 */
3509 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3510 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3511 goto out_unlock;
3512
3513 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3514
3515 /* Bind the P2P_DEVICE to the current PHY Context */
3516 mvmvif->phy_ctxt = phy_ctxt;
3517
3518 ret = iwl_mvm_binding_add_vif(mvm, vif);
3519 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3520 goto out_unlock;
3521
3522 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3523 goto schedule_time_event;
3524 }
3525 }
3526
3527 /* Need to update the PHY context only if the ROC channel changed */
3528 if (channel == mvmvif->phy_ctxt->channel)
3529 goto schedule_time_event;
3530
3531 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3532
3533 /*
3534 * Change the PHY context configuration as it is currently referenced
3535 * only by the P2P Device MAC
3536 */
3537 if (mvmvif->phy_ctxt->ref == 1) {
3538 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3539 &chandef, 1, 1);
3540 if (ret)
3541 goto out_unlock;
3542 } else {
3543 /*
3544 * The PHY context is shared with other MACs. Need to remove the
3545 * P2P Device from the binding, allocate an new PHY context and
3546 * create a new binding
3547 */
3548 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3549 if (!phy_ctxt) {
3550 ret = -ENOSPC;
3551 goto out_unlock;
3552 }
3553
3554 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3555 1, 1);
3556 if (ret) {
3557 IWL_ERR(mvm, "Failed to change PHY context\n");
3558 goto out_unlock;
3559 }
3560
3561 /* Unbind the P2P_DEVICE from the current PHY context */
3562 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3563 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3564 goto out_unlock;
3565
3566 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3567
3568 /* Bind the P2P_DEVICE to the new allocated PHY context */
3569 mvmvif->phy_ctxt = phy_ctxt;
3570
3571 ret = iwl_mvm_binding_add_vif(mvm, vif);
3572 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3573 goto out_unlock;
3574
3575 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3576 }
3577
3578 schedule_time_event:
3579 /* Schedule the time events */
3580 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3581
3582 out_unlock:
3583 mutex_unlock(&mvm->mutex);
3584 IWL_DEBUG_MAC80211(mvm, "leave\n");
3585 return ret;
3586 }
3587
iwl_mvm_cancel_roc(struct ieee80211_hw * hw)3588 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3589 {
3590 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3591
3592 IWL_DEBUG_MAC80211(mvm, "enter\n");
3593
3594 mutex_lock(&mvm->mutex);
3595 iwl_mvm_stop_roc(mvm);
3596 mutex_unlock(&mvm->mutex);
3597
3598 IWL_DEBUG_MAC80211(mvm, "leave\n");
3599 return 0;
3600 }
3601
__iwl_mvm_add_chanctx(struct iwl_mvm * mvm,struct ieee80211_chanctx_conf * ctx)3602 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3603 struct ieee80211_chanctx_conf *ctx)
3604 {
3605 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3606 struct iwl_mvm_phy_ctxt *phy_ctxt;
3607 int ret;
3608
3609 lockdep_assert_held(&mvm->mutex);
3610
3611 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3612
3613 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3614 if (!phy_ctxt) {
3615 ret = -ENOSPC;
3616 goto out;
3617 }
3618
3619 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3620 ctx->rx_chains_static,
3621 ctx->rx_chains_dynamic);
3622 if (ret) {
3623 IWL_ERR(mvm, "Failed to add PHY context\n");
3624 goto out;
3625 }
3626
3627 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3628 *phy_ctxt_id = phy_ctxt->id;
3629 out:
3630 return ret;
3631 }
3632
iwl_mvm_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)3633 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3634 struct ieee80211_chanctx_conf *ctx)
3635 {
3636 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3637 int ret;
3638
3639 mutex_lock(&mvm->mutex);
3640 ret = __iwl_mvm_add_chanctx(mvm, ctx);
3641 mutex_unlock(&mvm->mutex);
3642
3643 return ret;
3644 }
3645
__iwl_mvm_remove_chanctx(struct iwl_mvm * mvm,struct ieee80211_chanctx_conf * ctx)3646 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3647 struct ieee80211_chanctx_conf *ctx)
3648 {
3649 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3650 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3651
3652 lockdep_assert_held(&mvm->mutex);
3653
3654 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3655 }
3656
iwl_mvm_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)3657 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3658 struct ieee80211_chanctx_conf *ctx)
3659 {
3660 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3661
3662 mutex_lock(&mvm->mutex);
3663 __iwl_mvm_remove_chanctx(mvm, ctx);
3664 mutex_unlock(&mvm->mutex);
3665 }
3666
iwl_mvm_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)3667 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3668 struct ieee80211_chanctx_conf *ctx,
3669 u32 changed)
3670 {
3671 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3672 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3673 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3674
3675 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3676 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3677 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3678 IEEE80211_CHANCTX_CHANGE_RADAR |
3679 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3680 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3681 phy_ctxt->ref, changed))
3682 return;
3683
3684 mutex_lock(&mvm->mutex);
3685
3686 /* we are only changing the min_width, may be a noop */
3687 if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) {
3688 if (phy_ctxt->width == ctx->min_def.width)
3689 goto out_unlock;
3690
3691 /* we are just toggling between 20_NOHT and 20 */
3692 if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 &&
3693 ctx->min_def.width <= NL80211_CHAN_WIDTH_20)
3694 goto out_unlock;
3695 }
3696
3697 iwl_mvm_bt_coex_vif_change(mvm);
3698 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3699 ctx->rx_chains_static,
3700 ctx->rx_chains_dynamic);
3701
3702 out_unlock:
3703 mutex_unlock(&mvm->mutex);
3704 }
3705
__iwl_mvm_assign_vif_chanctx(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx,bool switching_chanctx)3706 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3707 struct ieee80211_vif *vif,
3708 struct ieee80211_chanctx_conf *ctx,
3709 bool switching_chanctx)
3710 {
3711 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3712 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3713 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3714 int ret;
3715
3716 lockdep_assert_held(&mvm->mutex);
3717
3718 mvmvif->phy_ctxt = phy_ctxt;
3719
3720 switch (vif->type) {
3721 case NL80211_IFTYPE_AP:
3722 /* only needed if we're switching chanctx (i.e. during CSA) */
3723 if (switching_chanctx) {
3724 mvmvif->ap_ibss_active = true;
3725 break;
3726 }
3727 case NL80211_IFTYPE_ADHOC:
3728 /*
3729 * The AP binding flow is handled as part of the start_ap flow
3730 * (in bss_info_changed), similarly for IBSS.
3731 */
3732 ret = 0;
3733 goto out;
3734 case NL80211_IFTYPE_STATION:
3735 mvmvif->csa_bcn_pending = false;
3736 break;
3737 case NL80211_IFTYPE_MONITOR:
3738 /* always disable PS when a monitor interface is active */
3739 mvmvif->ps_disabled = true;
3740 break;
3741 default:
3742 ret = -EINVAL;
3743 goto out;
3744 }
3745
3746 ret = iwl_mvm_binding_add_vif(mvm, vif);
3747 if (ret)
3748 goto out;
3749
3750 /*
3751 * Power state must be updated before quotas,
3752 * otherwise fw will complain.
3753 */
3754 iwl_mvm_power_update_mac(mvm);
3755
3756 /* Setting the quota at this stage is only required for monitor
3757 * interfaces. For the other types, the bss_info changed flow
3758 * will handle quota settings.
3759 */
3760 if (vif->type == NL80211_IFTYPE_MONITOR) {
3761 mvmvif->monitor_active = true;
3762 ret = iwl_mvm_update_quotas(mvm, false, NULL);
3763 if (ret)
3764 goto out_remove_binding;
3765
3766 ret = iwl_mvm_add_snif_sta(mvm, vif);
3767 if (ret)
3768 goto out_remove_binding;
3769
3770 }
3771
3772 /* Handle binding during CSA */
3773 if (vif->type == NL80211_IFTYPE_AP) {
3774 iwl_mvm_update_quotas(mvm, false, NULL);
3775 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3776 }
3777
3778 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3779 u32 duration = 3 * vif->bss_conf.beacon_int;
3780
3781 /* iwl_mvm_protect_session() reads directly from the
3782 * device (the system time), so make sure it is
3783 * available.
3784 */
3785 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3786 if (ret)
3787 goto out_remove_binding;
3788
3789 /* Protect the session to make sure we hear the first
3790 * beacon on the new channel.
3791 */
3792 mvmvif->csa_bcn_pending = true;
3793 iwl_mvm_protect_session(mvm, vif, duration, duration,
3794 vif->bss_conf.beacon_int / 2,
3795 true);
3796
3797 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3798
3799 iwl_mvm_update_quotas(mvm, false, NULL);
3800 }
3801
3802 goto out;
3803
3804 out_remove_binding:
3805 iwl_mvm_binding_remove_vif(mvm, vif);
3806 iwl_mvm_power_update_mac(mvm);
3807 out:
3808 if (ret)
3809 mvmvif->phy_ctxt = NULL;
3810 return ret;
3811 }
iwl_mvm_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)3812 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3813 struct ieee80211_vif *vif,
3814 struct ieee80211_chanctx_conf *ctx)
3815 {
3816 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3817 int ret;
3818
3819 mutex_lock(&mvm->mutex);
3820 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3821 mutex_unlock(&mvm->mutex);
3822
3823 return ret;
3824 }
3825
__iwl_mvm_unassign_vif_chanctx(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx,bool switching_chanctx)3826 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3827 struct ieee80211_vif *vif,
3828 struct ieee80211_chanctx_conf *ctx,
3829 bool switching_chanctx)
3830 {
3831 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3832 struct ieee80211_vif *disabled_vif = NULL;
3833
3834 lockdep_assert_held(&mvm->mutex);
3835
3836 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3837
3838 switch (vif->type) {
3839 case NL80211_IFTYPE_ADHOC:
3840 goto out;
3841 case NL80211_IFTYPE_MONITOR:
3842 mvmvif->monitor_active = false;
3843 mvmvif->ps_disabled = false;
3844 iwl_mvm_rm_snif_sta(mvm, vif);
3845 break;
3846 case NL80211_IFTYPE_AP:
3847 /* This part is triggered only during CSA */
3848 if (!switching_chanctx || !mvmvif->ap_ibss_active)
3849 goto out;
3850
3851 mvmvif->csa_countdown = false;
3852
3853 /* Set CS bit on all the stations */
3854 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3855
3856 /* Save blocked iface, the timeout is set on the next beacon */
3857 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3858
3859 mvmvif->ap_ibss_active = false;
3860 break;
3861 case NL80211_IFTYPE_STATION:
3862 if (!switching_chanctx)
3863 break;
3864
3865 disabled_vif = vif;
3866
3867 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3868 break;
3869 default:
3870 break;
3871 }
3872
3873 iwl_mvm_update_quotas(mvm, false, disabled_vif);
3874 iwl_mvm_binding_remove_vif(mvm, vif);
3875
3876 out:
3877 mvmvif->phy_ctxt = NULL;
3878 iwl_mvm_power_update_mac(mvm);
3879 }
3880
iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)3881 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3882 struct ieee80211_vif *vif,
3883 struct ieee80211_chanctx_conf *ctx)
3884 {
3885 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3886
3887 mutex_lock(&mvm->mutex);
3888 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3889 mutex_unlock(&mvm->mutex);
3890 }
3891
3892 static int
iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm * mvm,struct ieee80211_vif_chanctx_switch * vifs)3893 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3894 struct ieee80211_vif_chanctx_switch *vifs)
3895 {
3896 int ret;
3897
3898 mutex_lock(&mvm->mutex);
3899 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3900 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3901
3902 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3903 if (ret) {
3904 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3905 goto out_reassign;
3906 }
3907
3908 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3909 true);
3910 if (ret) {
3911 IWL_ERR(mvm,
3912 "failed to assign new_ctx during channel switch\n");
3913 goto out_remove;
3914 }
3915
3916 /* we don't support TDLS during DCM - can be caused by channel switch */
3917 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3918 iwl_mvm_teardown_tdls_peers(mvm);
3919
3920 goto out;
3921
3922 out_remove:
3923 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3924
3925 out_reassign:
3926 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3927 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3928 goto out_restart;
3929 }
3930
3931 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3932 true)) {
3933 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3934 goto out_restart;
3935 }
3936
3937 goto out;
3938
3939 out_restart:
3940 /* things keep failing, better restart the hw */
3941 iwl_mvm_nic_restart(mvm, false);
3942
3943 out:
3944 mutex_unlock(&mvm->mutex);
3945
3946 return ret;
3947 }
3948
3949 static int
iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm * mvm,struct ieee80211_vif_chanctx_switch * vifs)3950 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3951 struct ieee80211_vif_chanctx_switch *vifs)
3952 {
3953 int ret;
3954
3955 mutex_lock(&mvm->mutex);
3956 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3957
3958 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3959 true);
3960 if (ret) {
3961 IWL_ERR(mvm,
3962 "failed to assign new_ctx during channel switch\n");
3963 goto out_reassign;
3964 }
3965
3966 goto out;
3967
3968 out_reassign:
3969 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3970 true)) {
3971 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3972 goto out_restart;
3973 }
3974
3975 goto out;
3976
3977 out_restart:
3978 /* things keep failing, better restart the hw */
3979 iwl_mvm_nic_restart(mvm, false);
3980
3981 out:
3982 mutex_unlock(&mvm->mutex);
3983
3984 return ret;
3985 }
3986
iwl_mvm_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)3987 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3988 struct ieee80211_vif_chanctx_switch *vifs,
3989 int n_vifs,
3990 enum ieee80211_chanctx_switch_mode mode)
3991 {
3992 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3993 int ret;
3994
3995 /* we only support a single-vif right now */
3996 if (n_vifs > 1)
3997 return -EOPNOTSUPP;
3998
3999 switch (mode) {
4000 case CHANCTX_SWMODE_SWAP_CONTEXTS:
4001 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
4002 break;
4003 case CHANCTX_SWMODE_REASSIGN_VIF:
4004 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
4005 break;
4006 default:
4007 ret = -EOPNOTSUPP;
4008 break;
4009 }
4010
4011 return ret;
4012 }
4013
iwl_mvm_tx_last_beacon(struct ieee80211_hw * hw)4014 static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw)
4015 {
4016 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4017
4018 return mvm->ibss_manager;
4019 }
4020
iwl_mvm_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)4021 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
4022 struct ieee80211_sta *sta,
4023 bool set)
4024 {
4025 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4026 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4027
4028 if (!mvm_sta || !mvm_sta->vif) {
4029 IWL_ERR(mvm, "Station is not associated to a vif\n");
4030 return -EINVAL;
4031 }
4032
4033 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
4034 }
4035
4036 #ifdef CONFIG_NL80211_TESTMODE
4037 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
4038 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
4039 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
4040 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
4041 };
4042
__iwl_mvm_mac_testmode_cmd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,void * data,int len)4043 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
4044 struct ieee80211_vif *vif,
4045 void *data, int len)
4046 {
4047 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
4048 int err;
4049 u32 noa_duration;
4050
4051 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy,
4052 NULL);
4053 if (err)
4054 return err;
4055
4056 if (!tb[IWL_MVM_TM_ATTR_CMD])
4057 return -EINVAL;
4058
4059 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
4060 case IWL_MVM_TM_CMD_SET_NOA:
4061 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
4062 !vif->bss_conf.enable_beacon ||
4063 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
4064 return -EINVAL;
4065
4066 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
4067 if (noa_duration >= vif->bss_conf.beacon_int)
4068 return -EINVAL;
4069
4070 mvm->noa_duration = noa_duration;
4071 mvm->noa_vif = vif;
4072
4073 return iwl_mvm_update_quotas(mvm, true, NULL);
4074 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
4075 /* must be associated client vif - ignore authorized */
4076 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
4077 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
4078 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
4079 return -EINVAL;
4080
4081 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
4082 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
4083 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
4084 }
4085
4086 return -EOPNOTSUPP;
4087 }
4088
iwl_mvm_mac_testmode_cmd(struct ieee80211_hw * hw,struct ieee80211_vif * vif,void * data,int len)4089 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
4090 struct ieee80211_vif *vif,
4091 void *data, int len)
4092 {
4093 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4094 int err;
4095
4096 mutex_lock(&mvm->mutex);
4097 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
4098 mutex_unlock(&mvm->mutex);
4099
4100 return err;
4101 }
4102 #endif
4103
iwl_mvm_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * chsw)4104 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
4105 struct ieee80211_vif *vif,
4106 struct ieee80211_channel_switch *chsw)
4107 {
4108 /* By implementing this operation, we prevent mac80211 from
4109 * starting its own channel switch timer, so that we can call
4110 * ieee80211_chswitch_done() ourselves at the right time
4111 * (which is when the absence time event starts).
4112 */
4113
4114 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
4115 "dummy channel switch op\n");
4116 }
4117
iwl_mvm_pre_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * chsw)4118 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
4119 struct ieee80211_vif *vif,
4120 struct ieee80211_channel_switch *chsw)
4121 {
4122 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4123 struct ieee80211_vif *csa_vif;
4124 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4125 u32 apply_time;
4126 int ret;
4127
4128 mutex_lock(&mvm->mutex);
4129
4130 mvmvif->csa_failed = false;
4131
4132 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
4133 chsw->chandef.center_freq1);
4134
4135 iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt,
4136 ieee80211_vif_to_wdev(vif),
4137 FW_DBG_TRIGGER_CHANNEL_SWITCH);
4138
4139 switch (vif->type) {
4140 case NL80211_IFTYPE_AP:
4141 csa_vif =
4142 rcu_dereference_protected(mvm->csa_vif,
4143 lockdep_is_held(&mvm->mutex));
4144 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
4145 "Another CSA is already in progress")) {
4146 ret = -EBUSY;
4147 goto out_unlock;
4148 }
4149
4150 /* we still didn't unblock tx. prevent new CS meanwhile */
4151 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
4152 lockdep_is_held(&mvm->mutex))) {
4153 ret = -EBUSY;
4154 goto out_unlock;
4155 }
4156
4157 rcu_assign_pointer(mvm->csa_vif, vif);
4158
4159 if (WARN_ONCE(mvmvif->csa_countdown,
4160 "Previous CSA countdown didn't complete")) {
4161 ret = -EBUSY;
4162 goto out_unlock;
4163 }
4164
4165 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
4166
4167 break;
4168 case NL80211_IFTYPE_STATION:
4169 /* Schedule the time event to a bit before beacon 1,
4170 * to make sure we're in the new channel when the
4171 * GO/AP arrives. In case count <= 1 immediately schedule the
4172 * TE (this might result with some packet loss or connection
4173 * loss).
4174 */
4175 if (chsw->count <= 1)
4176 apply_time = 0;
4177 else
4178 apply_time = chsw->device_timestamp +
4179 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
4180 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
4181
4182 if (chsw->block_tx)
4183 iwl_mvm_csa_client_absent(mvm, vif);
4184
4185 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
4186 apply_time);
4187 if (mvmvif->bf_data.bf_enabled) {
4188 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
4189 if (ret)
4190 goto out_unlock;
4191 }
4192
4193 break;
4194 default:
4195 break;
4196 }
4197
4198 mvmvif->ps_disabled = true;
4199
4200 ret = iwl_mvm_power_update_ps(mvm);
4201 if (ret)
4202 goto out_unlock;
4203
4204 /* we won't be on this channel any longer */
4205 iwl_mvm_teardown_tdls_peers(mvm);
4206
4207 out_unlock:
4208 mutex_unlock(&mvm->mutex);
4209
4210 return ret;
4211 }
4212
iwl_mvm_post_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4213 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
4214 struct ieee80211_vif *vif)
4215 {
4216 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4217 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4218 int ret;
4219
4220 mutex_lock(&mvm->mutex);
4221
4222 if (mvmvif->csa_failed) {
4223 mvmvif->csa_failed = false;
4224 ret = -EIO;
4225 goto out_unlock;
4226 }
4227
4228 if (vif->type == NL80211_IFTYPE_STATION) {
4229 struct iwl_mvm_sta *mvmsta;
4230
4231 mvmvif->csa_bcn_pending = false;
4232 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
4233 mvmvif->ap_sta_id);
4234
4235 if (WARN_ON(!mvmsta)) {
4236 ret = -EIO;
4237 goto out_unlock;
4238 }
4239
4240 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
4241
4242 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
4243
4244 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
4245 if (ret)
4246 goto out_unlock;
4247
4248 iwl_mvm_stop_session_protection(mvm, vif);
4249 }
4250
4251 mvmvif->ps_disabled = false;
4252
4253 ret = iwl_mvm_power_update_ps(mvm);
4254
4255 out_unlock:
4256 mutex_unlock(&mvm->mutex);
4257
4258 return ret;
4259 }
4260
iwl_mvm_flush_no_vif(struct iwl_mvm * mvm,u32 queues,bool drop)4261 static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
4262 {
4263 int i;
4264
4265 if (!iwl_mvm_has_new_tx_api(mvm)) {
4266 if (drop) {
4267 mutex_lock(&mvm->mutex);
4268 iwl_mvm_flush_tx_path(mvm,
4269 iwl_mvm_flushable_queues(mvm) & queues, 0);
4270 mutex_unlock(&mvm->mutex);
4271 } else {
4272 iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
4273 }
4274 return;
4275 }
4276
4277 mutex_lock(&mvm->mutex);
4278 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
4279 struct ieee80211_sta *sta;
4280
4281 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
4282 lockdep_is_held(&mvm->mutex));
4283 if (IS_ERR_OR_NULL(sta))
4284 continue;
4285
4286 if (drop)
4287 iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0);
4288 else
4289 iwl_mvm_wait_sta_queues_empty(mvm,
4290 iwl_mvm_sta_from_mac80211(sta));
4291 }
4292 mutex_unlock(&mvm->mutex);
4293 }
4294
iwl_mvm_mac_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)4295 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
4296 struct ieee80211_vif *vif, u32 queues, bool drop)
4297 {
4298 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4299 struct iwl_mvm_vif *mvmvif;
4300 struct iwl_mvm_sta *mvmsta;
4301 struct ieee80211_sta *sta;
4302 int i;
4303 u32 msk = 0;
4304
4305 if (!vif) {
4306 iwl_mvm_flush_no_vif(mvm, queues, drop);
4307 return;
4308 }
4309
4310 if (vif->type != NL80211_IFTYPE_STATION)
4311 return;
4312
4313 /* Make sure we're done with the deferred traffic before flushing */
4314 flush_work(&mvm->add_stream_wk);
4315
4316 mutex_lock(&mvm->mutex);
4317 mvmvif = iwl_mvm_vif_from_mac80211(vif);
4318
4319 /* flush the AP-station and all TDLS peers */
4320 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
4321 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
4322 lockdep_is_held(&mvm->mutex));
4323 if (IS_ERR_OR_NULL(sta))
4324 continue;
4325
4326 mvmsta = iwl_mvm_sta_from_mac80211(sta);
4327 if (mvmsta->vif != vif)
4328 continue;
4329
4330 /* make sure only TDLS peers or the AP are flushed */
4331 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
4332
4333 if (drop) {
4334 if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0))
4335 IWL_ERR(mvm, "flush request fail\n");
4336 } else {
4337 msk |= mvmsta->tfd_queue_msk;
4338 if (iwl_mvm_has_new_tx_api(mvm))
4339 iwl_mvm_wait_sta_queues_empty(mvm, mvmsta);
4340 }
4341 }
4342
4343 mutex_unlock(&mvm->mutex);
4344
4345 /* this can take a while, and we may need/want other operations
4346 * to succeed while doing this, so do it without the mutex held
4347 */
4348 if (!drop && !iwl_mvm_has_new_tx_api(mvm))
4349 iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
4350 }
4351
iwl_mvm_mac_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)4352 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
4353 struct survey_info *survey)
4354 {
4355 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4356 int ret;
4357
4358 memset(survey, 0, sizeof(*survey));
4359
4360 /* only support global statistics right now */
4361 if (idx != 0)
4362 return -ENOENT;
4363
4364 if (!fw_has_capa(&mvm->fw->ucode_capa,
4365 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
4366 return -ENOENT;
4367
4368 mutex_lock(&mvm->mutex);
4369
4370 if (iwl_mvm_firmware_running(mvm)) {
4371 ret = iwl_mvm_request_statistics(mvm, false);
4372 if (ret)
4373 goto out;
4374 }
4375
4376 survey->filled = SURVEY_INFO_TIME |
4377 SURVEY_INFO_TIME_RX |
4378 SURVEY_INFO_TIME_TX |
4379 SURVEY_INFO_TIME_SCAN;
4380 survey->time = mvm->accu_radio_stats.on_time_rf +
4381 mvm->radio_stats.on_time_rf;
4382 do_div(survey->time, USEC_PER_MSEC);
4383
4384 survey->time_rx = mvm->accu_radio_stats.rx_time +
4385 mvm->radio_stats.rx_time;
4386 do_div(survey->time_rx, USEC_PER_MSEC);
4387
4388 survey->time_tx = mvm->accu_radio_stats.tx_time +
4389 mvm->radio_stats.tx_time;
4390 do_div(survey->time_tx, USEC_PER_MSEC);
4391
4392 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
4393 mvm->radio_stats.on_time_scan;
4394 do_div(survey->time_scan, USEC_PER_MSEC);
4395
4396 ret = 0;
4397 out:
4398 mutex_unlock(&mvm->mutex);
4399 return ret;
4400 }
4401
iwl_mvm_mac_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)4402 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4403 struct ieee80211_vif *vif,
4404 struct ieee80211_sta *sta,
4405 struct station_info *sinfo)
4406 {
4407 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4408 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4409 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4410
4411 if (mvmsta->avg_energy) {
4412 sinfo->signal_avg = mvmsta->avg_energy;
4413 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
4414 }
4415
4416 if (!fw_has_capa(&mvm->fw->ucode_capa,
4417 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
4418 return;
4419
4420 /* if beacon filtering isn't on mac80211 does it anyway */
4421 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4422 return;
4423
4424 if (!vif->bss_conf.assoc)
4425 return;
4426
4427 mutex_lock(&mvm->mutex);
4428
4429 if (mvmvif->ap_sta_id != mvmsta->sta_id)
4430 goto unlock;
4431
4432 if (iwl_mvm_request_statistics(mvm, false))
4433 goto unlock;
4434
4435 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
4436 mvmvif->beacon_stats.accu_num_beacons;
4437 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
4438 if (mvmvif->beacon_stats.avg_signal) {
4439 /* firmware only reports a value after RXing a few beacons */
4440 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
4441 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
4442 }
4443 unlock:
4444 mutex_unlock(&mvm->mutex);
4445 }
4446
iwl_mvm_event_mlme_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4447 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
4448 struct ieee80211_vif *vif,
4449 const struct ieee80211_event *event)
4450 {
4451 #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \
4452 do { \
4453 if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \
4454 break; \
4455 iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \
4456 } while (0)
4457
4458 struct iwl_fw_dbg_trigger_tlv *trig;
4459 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
4460
4461 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4462 return;
4463
4464 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4465 trig_mlme = (void *)trig->data;
4466 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
4467 ieee80211_vif_to_wdev(vif), trig))
4468 return;
4469
4470 if (event->u.mlme.data == ASSOC_EVENT) {
4471 if (event->u.mlme.status == MLME_DENIED)
4472 CHECK_MLME_TRIGGER(stop_assoc_denied,
4473 "DENIED ASSOC: reason %d",
4474 event->u.mlme.reason);
4475 else if (event->u.mlme.status == MLME_TIMEOUT)
4476 CHECK_MLME_TRIGGER(stop_assoc_timeout,
4477 "ASSOC TIMEOUT");
4478 } else if (event->u.mlme.data == AUTH_EVENT) {
4479 if (event->u.mlme.status == MLME_DENIED)
4480 CHECK_MLME_TRIGGER(stop_auth_denied,
4481 "DENIED AUTH: reason %d",
4482 event->u.mlme.reason);
4483 else if (event->u.mlme.status == MLME_TIMEOUT)
4484 CHECK_MLME_TRIGGER(stop_auth_timeout,
4485 "AUTH TIMEOUT");
4486 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4487 CHECK_MLME_TRIGGER(stop_rx_deauth,
4488 "DEAUTH RX %d", event->u.mlme.reason);
4489 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4490 CHECK_MLME_TRIGGER(stop_tx_deauth,
4491 "DEAUTH TX %d", event->u.mlme.reason);
4492 }
4493 #undef CHECK_MLME_TRIGGER
4494 }
4495
iwl_mvm_event_bar_rx_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4496 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4497 struct ieee80211_vif *vif,
4498 const struct ieee80211_event *event)
4499 {
4500 struct iwl_fw_dbg_trigger_tlv *trig;
4501 struct iwl_fw_dbg_trigger_ba *ba_trig;
4502
4503 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4504 return;
4505
4506 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4507 ba_trig = (void *)trig->data;
4508 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
4509 ieee80211_vif_to_wdev(vif), trig))
4510 return;
4511
4512 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4513 return;
4514
4515 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
4516 "BAR received from %pM, tid %d, ssn %d",
4517 event->u.ba.sta->addr, event->u.ba.tid,
4518 event->u.ba.ssn);
4519 }
4520
iwl_mvm_mac_event_callback(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct ieee80211_event * event)4521 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4522 struct ieee80211_vif *vif,
4523 const struct ieee80211_event *event)
4524 {
4525 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4526
4527 switch (event->type) {
4528 case MLME_EVENT:
4529 iwl_mvm_event_mlme_callback(mvm, vif, event);
4530 break;
4531 case BAR_RX_EVENT:
4532 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4533 break;
4534 case BA_FRAME_TIMEOUT:
4535 iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta,
4536 event->u.ba.tid);
4537 break;
4538 default:
4539 break;
4540 }
4541 }
4542
iwl_mvm_sync_rx_queues_internal(struct iwl_mvm * mvm,struct iwl_mvm_internal_rxq_notif * notif,u32 size)4543 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4544 struct iwl_mvm_internal_rxq_notif *notif,
4545 u32 size)
4546 {
4547 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
4548 int ret;
4549
4550 lockdep_assert_held(&mvm->mutex);
4551
4552 if (!iwl_mvm_has_new_rx_api(mvm))
4553 return;
4554
4555 notif->cookie = mvm->queue_sync_cookie;
4556
4557 if (notif->sync)
4558 atomic_set(&mvm->queue_sync_counter,
4559 mvm->trans->num_rx_queues);
4560
4561 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
4562 if (ret) {
4563 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
4564 goto out;
4565 }
4566
4567 if (notif->sync) {
4568 ret = wait_event_timeout(mvm->rx_sync_waitq,
4569 atomic_read(&mvm->queue_sync_counter) == 0 ||
4570 iwl_mvm_is_radio_killed(mvm),
4571 HZ);
4572 WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm));
4573 }
4574
4575 out:
4576 atomic_set(&mvm->queue_sync_counter, 0);
4577 mvm->queue_sync_cookie++;
4578 }
4579
iwl_mvm_sync_rx_queues(struct ieee80211_hw * hw)4580 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
4581 {
4582 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4583 struct iwl_mvm_internal_rxq_notif data = {
4584 .type = IWL_MVM_RXQ_EMPTY,
4585 .sync = 1,
4586 };
4587
4588 mutex_lock(&mvm->mutex);
4589 iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
4590 mutex_unlock(&mvm->mutex);
4591 }
4592
4593 const struct ieee80211_ops iwl_mvm_hw_ops = {
4594 .tx = iwl_mvm_mac_tx,
4595 .ampdu_action = iwl_mvm_mac_ampdu_action,
4596 .start = iwl_mvm_mac_start,
4597 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
4598 .stop = iwl_mvm_mac_stop,
4599 .add_interface = iwl_mvm_mac_add_interface,
4600 .remove_interface = iwl_mvm_mac_remove_interface,
4601 .config = iwl_mvm_mac_config,
4602 .prepare_multicast = iwl_mvm_prepare_multicast,
4603 .configure_filter = iwl_mvm_configure_filter,
4604 .config_iface_filter = iwl_mvm_config_iface_filter,
4605 .bss_info_changed = iwl_mvm_bss_info_changed,
4606 .hw_scan = iwl_mvm_mac_hw_scan,
4607 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4608 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4609 .sta_state = iwl_mvm_mac_sta_state,
4610 .sta_notify = iwl_mvm_mac_sta_notify,
4611 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4612 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4613 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4614 .sta_rc_update = iwl_mvm_sta_rc_update,
4615 .conf_tx = iwl_mvm_mac_conf_tx,
4616 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4617 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4618 .flush = iwl_mvm_mac_flush,
4619 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4620 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4621 .set_key = iwl_mvm_mac_set_key,
4622 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4623 .remain_on_channel = iwl_mvm_roc,
4624 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
4625 .add_chanctx = iwl_mvm_add_chanctx,
4626 .remove_chanctx = iwl_mvm_remove_chanctx,
4627 .change_chanctx = iwl_mvm_change_chanctx,
4628 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4629 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4630 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4631
4632 .start_ap = iwl_mvm_start_ap_ibss,
4633 .stop_ap = iwl_mvm_stop_ap_ibss,
4634 .join_ibss = iwl_mvm_start_ap_ibss,
4635 .leave_ibss = iwl_mvm_stop_ap_ibss,
4636
4637 .tx_last_beacon = iwl_mvm_tx_last_beacon,
4638
4639 .set_tim = iwl_mvm_set_tim,
4640
4641 .channel_switch = iwl_mvm_channel_switch,
4642 .pre_channel_switch = iwl_mvm_pre_channel_switch,
4643 .post_channel_switch = iwl_mvm_post_channel_switch,
4644
4645 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4646 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4647 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4648
4649 .event_callback = iwl_mvm_mac_event_callback,
4650
4651 .sync_rx_queues = iwl_mvm_sync_rx_queues,
4652
4653 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4654
4655 #ifdef CONFIG_PM_SLEEP
4656 /* look at d3.c */
4657 .suspend = iwl_mvm_suspend,
4658 .resume = iwl_mvm_resume,
4659 .set_wakeup = iwl_mvm_set_wakeup,
4660 .set_rekey_data = iwl_mvm_set_rekey_data,
4661 #if IS_ENABLED(CONFIG_IPV6)
4662 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4663 #endif
4664 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4665 #endif
4666 .get_survey = iwl_mvm_mac_get_survey,
4667 .sta_statistics = iwl_mvm_mac_sta_statistics,
4668 #ifdef CONFIG_IWLWIFI_DEBUGFS
4669 .sta_add_debugfs = iwl_mvm_sta_add_debugfs,
4670 #endif
4671 };
4672