1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
25 * USA
26 *
27 * The full GNU General Public License is included in this distribution
28 * in the file called COPYING.
29 *
30 * Contact Information:
31 * Intel Linux Wireless <linuxwifi@intel.com>
32 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 *
34 * BSD LICENSE
35 *
36 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
39 * Copyright(c) 2018 Intel Corporation
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 *
46 * * Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * * Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in
50 * the documentation and/or other materials provided with the
51 * distribution.
52 * * Neither the name Intel Corporation nor the names of its
53 * contributors may be used to endorse or promote products derived
54 * from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
57 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
58 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
59 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
60 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
62 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
63 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
64 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
65 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
66 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 *****************************************************************************/
68 #include <linux/types.h>
69 #include <linux/slab.h>
70 #include <linux/export.h>
71 #include <linux/etherdevice.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74
75 #include "iwl-drv.h"
76 #include "iwl-modparams.h"
77 #include "iwl-nvm-parse.h"
78 #include "iwl-prph.h"
79 #include "iwl-io.h"
80 #include "iwl-csr.h"
81 #include "fw/acpi.h"
82 #include "fw/api/nvm-reg.h"
83 #include "fw/api/commands.h"
84 #include "fw/api/cmdhdr.h"
85 #include "fw/img.h"
86
87 /* NVM offsets (in words) definitions */
88 enum nvm_offsets {
89 /* NVM HW-Section offset (in words) definitions */
90 SUBSYSTEM_ID = 0x0A,
91 HW_ADDR = 0x15,
92
93 /* NVM SW-Section offset (in words) definitions */
94 NVM_SW_SECTION = 0x1C0,
95 NVM_VERSION = 0,
96 RADIO_CFG = 1,
97 SKU = 2,
98 N_HW_ADDRS = 3,
99 NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
100
101 /* NVM calibration section offset (in words) definitions */
102 NVM_CALIB_SECTION = 0x2B8,
103 XTAL_CALIB = 0x316 - NVM_CALIB_SECTION,
104
105 /* NVM REGULATORY -Section offset (in words) definitions */
106 NVM_CHANNELS_SDP = 0,
107 };
108
109 enum ext_nvm_offsets {
110 /* NVM HW-Section offset (in words) definitions */
111 MAC_ADDRESS_OVERRIDE_EXT_NVM = 1,
112
113 /* NVM SW-Section offset (in words) definitions */
114 NVM_VERSION_EXT_NVM = 0,
115 RADIO_CFG_FAMILY_EXT_NVM = 0,
116 SKU_FAMILY_8000 = 2,
117 N_HW_ADDRS_FAMILY_8000 = 3,
118
119 /* NVM REGULATORY -Section offset (in words) definitions */
120 NVM_CHANNELS_EXTENDED = 0,
121 NVM_LAR_OFFSET_OLD = 0x4C7,
122 NVM_LAR_OFFSET = 0x507,
123 NVM_LAR_ENABLED = 0x7,
124 };
125
126 /* SKU Capabilities (actual values from NVM definition) */
127 enum nvm_sku_bits {
128 NVM_SKU_CAP_BAND_24GHZ = BIT(0),
129 NVM_SKU_CAP_BAND_52GHZ = BIT(1),
130 NVM_SKU_CAP_11N_ENABLE = BIT(2),
131 NVM_SKU_CAP_11AC_ENABLE = BIT(3),
132 NVM_SKU_CAP_MIMO_DISABLE = BIT(5),
133 };
134
135 /*
136 * These are the channel numbers in the order that they are stored in the NVM
137 */
138 static const u8 iwl_nvm_channels[] = {
139 /* 2.4 GHz */
140 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
141 /* 5 GHz */
142 36, 40, 44 , 48, 52, 56, 60, 64,
143 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
144 149, 153, 157, 161, 165
145 };
146
147 static const u8 iwl_ext_nvm_channels[] = {
148 /* 2.4 GHz */
149 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
150 /* 5 GHz */
151 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
152 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
153 149, 153, 157, 161, 165, 169, 173, 177, 181
154 };
155
156 #define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
157 #define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels)
158 #define NUM_2GHZ_CHANNELS 14
159 #define NUM_2GHZ_CHANNELS_EXT 14
160 #define FIRST_2GHZ_HT_MINUS 5
161 #define LAST_2GHZ_HT_PLUS 9
162 #define LAST_5GHZ_HT 165
163 #define LAST_5GHZ_HT_FAMILY_8000 181
164 #define N_HW_ADDR_MASK 0xF
165
166 /* rate data (static) */
167 static struct ieee80211_rate iwl_cfg80211_rates[] = {
168 { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
169 { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
170 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
171 { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
172 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
173 { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
174 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
175 { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
176 { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
177 { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
178 { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
179 { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
180 { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
181 { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
182 { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
183 };
184 #define RATES_24_OFFS 0
185 #define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
186 #define RATES_52_OFFS 4
187 #define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
188
189 /**
190 * enum iwl_nvm_channel_flags - channel flags in NVM
191 * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
192 * @NVM_CHANNEL_IBSS: usable as an IBSS channel
193 * @NVM_CHANNEL_ACTIVE: active scanning allowed
194 * @NVM_CHANNEL_RADAR: radar detection required
195 * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
196 * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
197 * on same channel on 2.4 or same UNII band on 5.2
198 * @NVM_CHANNEL_UNIFORM: uniform spreading required
199 * @NVM_CHANNEL_20MHZ: 20 MHz channel okay
200 * @NVM_CHANNEL_40MHZ: 40 MHz channel okay
201 * @NVM_CHANNEL_80MHZ: 80 MHz channel okay
202 * @NVM_CHANNEL_160MHZ: 160 MHz channel okay
203 * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
204 */
205 enum iwl_nvm_channel_flags {
206 NVM_CHANNEL_VALID = BIT(0),
207 NVM_CHANNEL_IBSS = BIT(1),
208 NVM_CHANNEL_ACTIVE = BIT(3),
209 NVM_CHANNEL_RADAR = BIT(4),
210 NVM_CHANNEL_INDOOR_ONLY = BIT(5),
211 NVM_CHANNEL_GO_CONCURRENT = BIT(6),
212 NVM_CHANNEL_UNIFORM = BIT(7),
213 NVM_CHANNEL_20MHZ = BIT(8),
214 NVM_CHANNEL_40MHZ = BIT(9),
215 NVM_CHANNEL_80MHZ = BIT(10),
216 NVM_CHANNEL_160MHZ = BIT(11),
217 NVM_CHANNEL_DC_HIGH = BIT(12),
218 };
219
iwl_nvm_print_channel_flags(struct device * dev,u32 level,int chan,u16 flags)220 static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
221 int chan, u16 flags)
222 {
223 #define CHECK_AND_PRINT_I(x) \
224 ((flags & NVM_CHANNEL_##x) ? " " #x : "")
225
226 if (!(flags & NVM_CHANNEL_VALID)) {
227 IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n",
228 chan, flags);
229 return;
230 }
231
232 /* Note: already can print up to 101 characters, 110 is the limit! */
233 IWL_DEBUG_DEV(dev, level,
234 "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
235 chan, flags,
236 CHECK_AND_PRINT_I(VALID),
237 CHECK_AND_PRINT_I(IBSS),
238 CHECK_AND_PRINT_I(ACTIVE),
239 CHECK_AND_PRINT_I(RADAR),
240 CHECK_AND_PRINT_I(INDOOR_ONLY),
241 CHECK_AND_PRINT_I(GO_CONCURRENT),
242 CHECK_AND_PRINT_I(UNIFORM),
243 CHECK_AND_PRINT_I(20MHZ),
244 CHECK_AND_PRINT_I(40MHZ),
245 CHECK_AND_PRINT_I(80MHZ),
246 CHECK_AND_PRINT_I(160MHZ),
247 CHECK_AND_PRINT_I(DC_HIGH));
248 #undef CHECK_AND_PRINT_I
249 }
250
iwl_get_channel_flags(u8 ch_num,int ch_idx,bool is_5ghz,u16 nvm_flags,const struct iwl_cfg * cfg)251 static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
252 u16 nvm_flags, const struct iwl_cfg *cfg)
253 {
254 u32 flags = IEEE80211_CHAN_NO_HT40;
255 u32 last_5ghz_ht = LAST_5GHZ_HT;
256
257 if (cfg->nvm_type == IWL_NVM_EXT)
258 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
259
260 if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
261 if (ch_num <= LAST_2GHZ_HT_PLUS)
262 flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
263 if (ch_num >= FIRST_2GHZ_HT_MINUS)
264 flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
265 } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
266 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
267 flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
268 else
269 flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
270 }
271 if (!(nvm_flags & NVM_CHANNEL_80MHZ))
272 flags |= IEEE80211_CHAN_NO_80MHZ;
273 if (!(nvm_flags & NVM_CHANNEL_160MHZ))
274 flags |= IEEE80211_CHAN_NO_160MHZ;
275
276 if (!(nvm_flags & NVM_CHANNEL_IBSS))
277 flags |= IEEE80211_CHAN_NO_IR;
278
279 if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
280 flags |= IEEE80211_CHAN_NO_IR;
281
282 if (nvm_flags & NVM_CHANNEL_RADAR)
283 flags |= IEEE80211_CHAN_RADAR;
284
285 if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
286 flags |= IEEE80211_CHAN_INDOOR_ONLY;
287
288 /* Set the GO concurrent flag only in case that NO_IR is set.
289 * Otherwise it is meaningless
290 */
291 if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
292 (flags & IEEE80211_CHAN_NO_IR))
293 flags |= IEEE80211_CHAN_IR_CONCURRENT;
294
295 return flags;
296 }
297
iwl_init_channel_map(struct device * dev,const struct iwl_cfg * cfg,struct iwl_nvm_data * data,const __le16 * const nvm_ch_flags,u32 sbands_flags)298 static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
299 struct iwl_nvm_data *data,
300 const __le16 * const nvm_ch_flags,
301 u32 sbands_flags)
302 {
303 int ch_idx;
304 int n_channels = 0;
305 struct ieee80211_channel *channel;
306 u16 ch_flags;
307 int num_of_ch, num_2ghz_channels;
308 const u8 *nvm_chan;
309
310 if (cfg->nvm_type != IWL_NVM_EXT) {
311 num_of_ch = IWL_NVM_NUM_CHANNELS;
312 nvm_chan = &iwl_nvm_channels[0];
313 num_2ghz_channels = NUM_2GHZ_CHANNELS;
314 } else {
315 num_of_ch = IWL_NVM_NUM_CHANNELS_EXT;
316 nvm_chan = &iwl_ext_nvm_channels[0];
317 num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
318 }
319
320 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
321 bool is_5ghz = (ch_idx >= num_2ghz_channels);
322
323 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
324
325 if (is_5ghz && !data->sku_cap_band_52ghz_enable)
326 continue;
327
328 /* workaround to disable wide channels in 5GHz */
329 if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) &&
330 is_5ghz) {
331 ch_flags &= ~(NVM_CHANNEL_40MHZ |
332 NVM_CHANNEL_80MHZ |
333 NVM_CHANNEL_160MHZ);
334 }
335
336 if (ch_flags & NVM_CHANNEL_160MHZ)
337 data->vht160_supported = true;
338
339 if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) &&
340 !(ch_flags & NVM_CHANNEL_VALID)) {
341 /*
342 * Channels might become valid later if lar is
343 * supported, hence we still want to add them to
344 * the list of supported channels to cfg80211.
345 */
346 iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
347 nvm_chan[ch_idx], ch_flags);
348 continue;
349 }
350
351 channel = &data->channels[n_channels];
352 n_channels++;
353
354 channel->hw_value = nvm_chan[ch_idx];
355 channel->band = is_5ghz ?
356 NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
357 channel->center_freq =
358 ieee80211_channel_to_frequency(
359 channel->hw_value, channel->band);
360
361 /* Initialize regulatory-based run-time data */
362
363 /*
364 * Default value - highest tx power value. max_power
365 * is not used in mvm, and is used for backwards compatibility
366 */
367 channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
368
369 /* don't put limitations in case we're using LAR */
370 if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR))
371 channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
372 ch_idx, is_5ghz,
373 ch_flags, cfg);
374 else
375 channel->flags = 0;
376
377 iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
378 channel->hw_value, ch_flags);
379 IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n",
380 channel->hw_value, channel->max_power);
381 }
382
383 return n_channels;
384 }
385
iwl_init_vht_hw_capab(const struct iwl_cfg * cfg,struct iwl_nvm_data * data,struct ieee80211_sta_vht_cap * vht_cap,u8 tx_chains,u8 rx_chains)386 static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
387 struct iwl_nvm_data *data,
388 struct ieee80211_sta_vht_cap *vht_cap,
389 u8 tx_chains, u8 rx_chains)
390 {
391 int num_rx_ants = num_of_ant(rx_chains);
392 int num_tx_ants = num_of_ant(tx_chains);
393 unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?:
394 IEEE80211_VHT_MAX_AMPDU_1024K);
395
396 vht_cap->vht_supported = true;
397
398 vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
399 IEEE80211_VHT_CAP_RXSTBC_1 |
400 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
401 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
402 max_ampdu_exponent <<
403 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
404
405 if (data->vht160_supported)
406 vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
407 IEEE80211_VHT_CAP_SHORT_GI_160;
408
409 if (cfg->vht_mu_mimo_supported)
410 vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
411
412 if (cfg->ht_params->ldpc)
413 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
414
415 if (data->sku_cap_mimo_disabled) {
416 num_rx_ants = 1;
417 num_tx_ants = 1;
418 }
419
420 if (num_tx_ants > 1)
421 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
422 else
423 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
424
425 switch (iwlwifi_mod_params.amsdu_size) {
426 case IWL_AMSDU_DEF:
427 if (cfg->mq_rx_supported)
428 vht_cap->cap |=
429 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
430 else
431 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
432 break;
433 case IWL_AMSDU_2K:
434 if (cfg->mq_rx_supported)
435 vht_cap->cap |=
436 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
437 else
438 WARN(1, "RB size of 2K is not supported by this device\n");
439 break;
440 case IWL_AMSDU_4K:
441 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
442 break;
443 case IWL_AMSDU_8K:
444 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
445 break;
446 case IWL_AMSDU_12K:
447 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
448 break;
449 default:
450 break;
451 }
452
453 vht_cap->vht_mcs.rx_mcs_map =
454 cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
455 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
456 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
457 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
458 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
459 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
460 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
461 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
462
463 if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) {
464 vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
465 /* this works because NOT_SUPPORTED == 3 */
466 vht_cap->vht_mcs.rx_mcs_map |=
467 cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
468 }
469
470 vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
471 }
472
473 static struct ieee80211_sband_iftype_data iwl_he_capa = {
474 .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
475 .he_cap = {
476 .has_he = true,
477 .he_cap_elem = {
478 .mac_cap_info[0] =
479 IEEE80211_HE_MAC_CAP0_HTC_HE,
480 .mac_cap_info[1] =
481 IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
482 IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
483 .mac_cap_info[2] =
484 IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
485 IEEE80211_HE_MAC_CAP2_ACK_EN,
486 .mac_cap_info[3] =
487 IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
488 IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
489 .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
490 .phy_cap_info[0] =
491 IEEE80211_HE_PHY_CAP0_DUAL_BAND |
492 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
493 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
494 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
495 .phy_cap_info[1] =
496 IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
497 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
498 IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
499 .phy_cap_info[2] =
500 IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
501 IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
502 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
503 .phy_cap_info[3] =
504 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
505 IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
506 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
507 IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
508 .phy_cap_info[4] =
509 IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
510 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
511 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
512 .phy_cap_info[5] =
513 IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
514 IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
515 .phy_cap_info[6] =
516 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
517 .phy_cap_info[7] =
518 IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
519 IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
520 IEEE80211_HE_PHY_CAP7_MAX_NC_7,
521 .phy_cap_info[8] =
522 IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
523 IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
524 IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
525 IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU,
526 },
527 /*
528 * Set default Tx/Rx HE MCS NSS Support field. Indicate support
529 * for up to 2 spatial streams and all MCS, without any special
530 * cases
531 */
532 .he_mcs_nss_supp = {
533 .rx_mcs_80 = cpu_to_le16(0xfffa),
534 .tx_mcs_80 = cpu_to_le16(0xfffa),
535 .rx_mcs_160 = cpu_to_le16(0xfffa),
536 .tx_mcs_160 = cpu_to_le16(0xfffa),
537 .rx_mcs_80p80 = cpu_to_le16(0xffff),
538 .tx_mcs_80p80 = cpu_to_le16(0xffff),
539 },
540 /*
541 * Set default PPE thresholds, with PPET16 set to 0, PPET8 set
542 * to 7
543 */
544 .ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
545 },
546 };
547
iwl_init_he_hw_capab(struct ieee80211_supported_band * sband,u8 tx_chains,u8 rx_chains)548 static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
549 u8 tx_chains, u8 rx_chains)
550 {
551 if (sband->band == NL80211_BAND_2GHZ ||
552 sband->band == NL80211_BAND_5GHZ)
553 sband->iftype_data = &iwl_he_capa;
554 else
555 return;
556
557 sband->n_iftype_data = 1;
558
559 /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
560 if ((tx_chains & rx_chains) != ANT_AB) {
561 iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &=
562 ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS;
563 iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &=
564 ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS;
565 }
566 }
567
iwl_init_sbands(struct device * dev,const struct iwl_cfg * cfg,struct iwl_nvm_data * data,const __le16 * nvm_ch_flags,u8 tx_chains,u8 rx_chains,u32 sbands_flags)568 static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
569 struct iwl_nvm_data *data,
570 const __le16 *nvm_ch_flags, u8 tx_chains,
571 u8 rx_chains, u32 sbands_flags)
572 {
573 int n_channels;
574 int n_used = 0;
575 struct ieee80211_supported_band *sband;
576
577 n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
578 sbands_flags);
579 sband = &data->bands[NL80211_BAND_2GHZ];
580 sband->band = NL80211_BAND_2GHZ;
581 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
582 sband->n_bitrates = N_RATES_24;
583 n_used += iwl_init_sband_channels(data, sband, n_channels,
584 NL80211_BAND_2GHZ);
585 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
586 tx_chains, rx_chains);
587
588 if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
589 iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
590
591 sband = &data->bands[NL80211_BAND_5GHZ];
592 sband->band = NL80211_BAND_5GHZ;
593 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
594 sband->n_bitrates = N_RATES_52;
595 n_used += iwl_init_sband_channels(data, sband, n_channels,
596 NL80211_BAND_5GHZ);
597 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
598 tx_chains, rx_chains);
599 if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac)
600 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
601 tx_chains, rx_chains);
602
603 if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
604 iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
605
606 if (n_channels != n_used)
607 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
608 n_used, n_channels);
609 }
610
iwl_get_sku(const struct iwl_cfg * cfg,const __le16 * nvm_sw,const __le16 * phy_sku)611 static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
612 const __le16 *phy_sku)
613 {
614 if (cfg->nvm_type != IWL_NVM_EXT)
615 return le16_to_cpup(nvm_sw + SKU);
616
617 return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
618 }
619
iwl_get_nvm_version(const struct iwl_cfg * cfg,const __le16 * nvm_sw)620 static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
621 {
622 if (cfg->nvm_type != IWL_NVM_EXT)
623 return le16_to_cpup(nvm_sw + NVM_VERSION);
624 else
625 return le32_to_cpup((__le32 *)(nvm_sw +
626 NVM_VERSION_EXT_NVM));
627 }
628
iwl_get_radio_cfg(const struct iwl_cfg * cfg,const __le16 * nvm_sw,const __le16 * phy_sku)629 static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
630 const __le16 *phy_sku)
631 {
632 if (cfg->nvm_type != IWL_NVM_EXT)
633 return le16_to_cpup(nvm_sw + RADIO_CFG);
634
635 return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
636
637 }
638
iwl_get_n_hw_addrs(const struct iwl_cfg * cfg,const __le16 * nvm_sw)639 static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
640 {
641 int n_hw_addr;
642
643 if (cfg->nvm_type != IWL_NVM_EXT)
644 return le16_to_cpup(nvm_sw + N_HW_ADDRS);
645
646 n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
647
648 return n_hw_addr & N_HW_ADDR_MASK;
649 }
650
iwl_set_radio_cfg(const struct iwl_cfg * cfg,struct iwl_nvm_data * data,u32 radio_cfg)651 static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
652 struct iwl_nvm_data *data,
653 u32 radio_cfg)
654 {
655 if (cfg->nvm_type != IWL_NVM_EXT) {
656 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
657 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
658 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
659 data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
660 return;
661 }
662
663 /* set the radio configuration for family 8000 */
664 data->radio_cfg_type = EXT_NVM_RF_CFG_TYPE_MSK(radio_cfg);
665 data->radio_cfg_step = EXT_NVM_RF_CFG_STEP_MSK(radio_cfg);
666 data->radio_cfg_dash = EXT_NVM_RF_CFG_DASH_MSK(radio_cfg);
667 data->radio_cfg_pnum = EXT_NVM_RF_CFG_FLAVOR_MSK(radio_cfg);
668 data->valid_tx_ant = EXT_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
669 data->valid_rx_ant = EXT_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
670 }
671
iwl_flip_hw_address(__le32 mac_addr0,__le32 mac_addr1,u8 * dest)672 static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
673 {
674 const u8 *hw_addr;
675
676 hw_addr = (const u8 *)&mac_addr0;
677 dest[0] = hw_addr[3];
678 dest[1] = hw_addr[2];
679 dest[2] = hw_addr[1];
680 dest[3] = hw_addr[0];
681
682 hw_addr = (const u8 *)&mac_addr1;
683 dest[4] = hw_addr[1];
684 dest[5] = hw_addr[0];
685 }
686
iwl_set_hw_address_from_csr(struct iwl_trans * trans,struct iwl_nvm_data * data)687 static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
688 struct iwl_nvm_data *data)
689 {
690 __le32 mac_addr0 =
691 cpu_to_le32(iwl_read32(trans,
692 trans->cfg->csr->mac_addr0_strap));
693 __le32 mac_addr1 =
694 cpu_to_le32(iwl_read32(trans,
695 trans->cfg->csr->mac_addr1_strap));
696
697 iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
698 /*
699 * If the OEM fused a valid address, use it instead of the one in the
700 * OTP
701 */
702 if (is_valid_ether_addr(data->hw_addr))
703 return;
704
705 mac_addr0 = cpu_to_le32(iwl_read32(trans,
706 trans->cfg->csr->mac_addr0_otp));
707 mac_addr1 = cpu_to_le32(iwl_read32(trans,
708 trans->cfg->csr->mac_addr1_otp));
709
710 iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
711 }
712
iwl_set_hw_address_family_8000(struct iwl_trans * trans,const struct iwl_cfg * cfg,struct iwl_nvm_data * data,const __le16 * mac_override,const __be16 * nvm_hw)713 static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
714 const struct iwl_cfg *cfg,
715 struct iwl_nvm_data *data,
716 const __le16 *mac_override,
717 const __be16 *nvm_hw)
718 {
719 const u8 *hw_addr;
720
721 if (mac_override) {
722 static const u8 reserved_mac[] = {
723 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
724 };
725
726 hw_addr = (const u8 *)(mac_override +
727 MAC_ADDRESS_OVERRIDE_EXT_NVM);
728
729 /*
730 * Store the MAC address from MAO section.
731 * No byte swapping is required in MAO section
732 */
733 memcpy(data->hw_addr, hw_addr, ETH_ALEN);
734
735 /*
736 * Force the use of the OTP MAC address in case of reserved MAC
737 * address in the NVM, or if address is given but invalid.
738 */
739 if (is_valid_ether_addr(data->hw_addr) &&
740 memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0)
741 return;
742
743 IWL_ERR(trans,
744 "mac address from nvm override section is not valid\n");
745 }
746
747 if (nvm_hw) {
748 /* read the mac address from WFMP registers */
749 __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans,
750 WFMP_MAC_ADDR_0));
751 __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans,
752 WFMP_MAC_ADDR_1));
753
754 iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
755
756 return;
757 }
758
759 IWL_ERR(trans, "mac address is not found\n");
760 }
761
iwl_set_hw_address(struct iwl_trans * trans,const struct iwl_cfg * cfg,struct iwl_nvm_data * data,const __be16 * nvm_hw,const __le16 * mac_override)762 static int iwl_set_hw_address(struct iwl_trans *trans,
763 const struct iwl_cfg *cfg,
764 struct iwl_nvm_data *data, const __be16 *nvm_hw,
765 const __le16 *mac_override)
766 {
767 if (cfg->mac_addr_from_csr) {
768 iwl_set_hw_address_from_csr(trans, data);
769 } else if (cfg->nvm_type != IWL_NVM_EXT) {
770 const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
771
772 /* The byte order is little endian 16 bit, meaning 214365 */
773 data->hw_addr[0] = hw_addr[1];
774 data->hw_addr[1] = hw_addr[0];
775 data->hw_addr[2] = hw_addr[3];
776 data->hw_addr[3] = hw_addr[2];
777 data->hw_addr[4] = hw_addr[5];
778 data->hw_addr[5] = hw_addr[4];
779 } else {
780 iwl_set_hw_address_family_8000(trans, cfg, data,
781 mac_override, nvm_hw);
782 }
783
784 if (!is_valid_ether_addr(data->hw_addr)) {
785 IWL_ERR(trans, "no valid mac address was found\n");
786 return -EINVAL;
787 }
788
789 IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr);
790
791 return 0;
792 }
793
794 static bool
iwl_nvm_no_wide_in_5ghz(struct device * dev,const struct iwl_cfg * cfg,const __be16 * nvm_hw)795 iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg,
796 const __be16 *nvm_hw)
797 {
798 /*
799 * Workaround a bug in Indonesia SKUs where the regulatory in
800 * some 7000-family OTPs erroneously allow wide channels in
801 * 5GHz. To check for Indonesia, we take the SKU value from
802 * bits 1-4 in the subsystem ID and check if it is either 5 or
803 * 9. In those cases, we need to force-disable wide channels
804 * in 5GHz otherwise the FW will throw a sysassert when we try
805 * to use them.
806 */
807 if (cfg->device_family == IWL_DEVICE_FAMILY_7000) {
808 /*
809 * Unlike the other sections in the NVM, the hw
810 * section uses big-endian.
811 */
812 u16 subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID);
813 u8 sku = (subsystem_id & 0x1e) >> 1;
814
815 if (sku == 5 || sku == 9) {
816 IWL_DEBUG_EEPROM(dev,
817 "disabling wide channels in 5GHz (0x%0x %d)\n",
818 subsystem_id, sku);
819 return true;
820 }
821 }
822
823 return false;
824 }
825
826 struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans * trans,const struct iwl_cfg * cfg,const __be16 * nvm_hw,const __le16 * nvm_sw,const __le16 * nvm_calib,const __le16 * regulatory,const __le16 * mac_override,const __le16 * phy_sku,u8 tx_chains,u8 rx_chains,bool lar_fw_supported)827 iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
828 const __be16 *nvm_hw, const __le16 *nvm_sw,
829 const __le16 *nvm_calib, const __le16 *regulatory,
830 const __le16 *mac_override, const __le16 *phy_sku,
831 u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
832 {
833 struct device *dev = trans->dev;
834 struct iwl_nvm_data *data;
835 bool lar_enabled;
836 u32 sku, radio_cfg;
837 u32 sbands_flags = 0;
838 u16 lar_config;
839 const __le16 *ch_section;
840
841 if (cfg->nvm_type != IWL_NVM_EXT)
842 data = kzalloc(sizeof(*data) +
843 sizeof(struct ieee80211_channel) *
844 IWL_NVM_NUM_CHANNELS,
845 GFP_KERNEL);
846 else
847 data = kzalloc(sizeof(*data) +
848 sizeof(struct ieee80211_channel) *
849 IWL_NVM_NUM_CHANNELS_EXT,
850 GFP_KERNEL);
851 if (!data)
852 return NULL;
853
854 data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
855
856 radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku);
857 iwl_set_radio_cfg(cfg, data, radio_cfg);
858 if (data->valid_tx_ant)
859 tx_chains &= data->valid_tx_ant;
860 if (data->valid_rx_ant)
861 rx_chains &= data->valid_rx_ant;
862
863 sku = iwl_get_sku(cfg, nvm_sw, phy_sku);
864 data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
865 data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
866 data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
867 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
868 data->sku_cap_11n_enable = false;
869 data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
870 (sku & NVM_SKU_CAP_11AC_ENABLE);
871 data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
872
873 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
874
875 if (cfg->nvm_type != IWL_NVM_EXT) {
876 /* Checking for required sections */
877 if (!nvm_calib) {
878 IWL_ERR(trans,
879 "Can't parse empty Calib NVM sections\n");
880 kfree(data);
881 return NULL;
882 }
883
884 ch_section = cfg->nvm_type == IWL_NVM_SDP ?
885 ®ulatory[NVM_CHANNELS_SDP] :
886 &nvm_sw[NVM_CHANNELS];
887
888 /* in family 8000 Xtal calibration values moved to OTP */
889 data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
890 data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
891 lar_enabled = true;
892 } else {
893 u16 lar_offset = data->nvm_version < 0xE39 ?
894 NVM_LAR_OFFSET_OLD :
895 NVM_LAR_OFFSET;
896
897 lar_config = le16_to_cpup(regulatory + lar_offset);
898 data->lar_enabled = !!(lar_config &
899 NVM_LAR_ENABLED);
900 lar_enabled = data->lar_enabled;
901 ch_section = ®ulatory[NVM_CHANNELS_EXTENDED];
902 }
903
904 /* If no valid mac address was found - bail out */
905 if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) {
906 kfree(data);
907 return NULL;
908 }
909
910 if (lar_fw_supported && lar_enabled)
911 sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
912
913 if (iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw))
914 sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
915
916 iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
917 sbands_flags);
918 data->calib_version = 255;
919
920 return data;
921 }
922 IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
923
iwl_nvm_get_regdom_bw_flags(const u8 * nvm_chan,int ch_idx,u16 nvm_flags,const struct iwl_cfg * cfg)924 static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
925 int ch_idx, u16 nvm_flags,
926 const struct iwl_cfg *cfg)
927 {
928 u32 flags = NL80211_RRF_NO_HT40;
929 u32 last_5ghz_ht = LAST_5GHZ_HT;
930
931 if (cfg->nvm_type == IWL_NVM_EXT)
932 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
933
934 if (ch_idx < NUM_2GHZ_CHANNELS &&
935 (nvm_flags & NVM_CHANNEL_40MHZ)) {
936 if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
937 flags &= ~NL80211_RRF_NO_HT40PLUS;
938 if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
939 flags &= ~NL80211_RRF_NO_HT40MINUS;
940 } else if (nvm_chan[ch_idx] <= last_5ghz_ht &&
941 (nvm_flags & NVM_CHANNEL_40MHZ)) {
942 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
943 flags &= ~NL80211_RRF_NO_HT40PLUS;
944 else
945 flags &= ~NL80211_RRF_NO_HT40MINUS;
946 }
947
948 if (!(nvm_flags & NVM_CHANNEL_80MHZ))
949 flags |= NL80211_RRF_NO_80MHZ;
950 if (!(nvm_flags & NVM_CHANNEL_160MHZ))
951 flags |= NL80211_RRF_NO_160MHZ;
952
953 if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
954 flags |= NL80211_RRF_NO_IR;
955
956 if (nvm_flags & NVM_CHANNEL_RADAR)
957 flags |= NL80211_RRF_DFS;
958
959 if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
960 flags |= NL80211_RRF_NO_OUTDOOR;
961
962 /* Set the GO concurrent flag only in case that NO_IR is set.
963 * Otherwise it is meaningless
964 */
965 if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
966 (flags & NL80211_RRF_NO_IR))
967 flags |= NL80211_RRF_GO_CONCURRENT;
968
969 return flags;
970 }
971
972 struct regdb_ptrs {
973 struct ieee80211_wmm_rule *rule;
974 u32 token;
975 };
976
977 struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device * dev,const struct iwl_cfg * cfg,int num_of_ch,__le32 * channels,u16 fw_mcc,u16 geo_info)978 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
979 int num_of_ch, __le32 *channels, u16 fw_mcc,
980 u16 geo_info)
981 {
982 int ch_idx;
983 u16 ch_flags;
984 u32 reg_rule_flags, prev_reg_rule_flags = 0;
985 const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
986 iwl_ext_nvm_channels : iwl_nvm_channels;
987 struct ieee80211_regdomain *regd, *copy_rd;
988 int size_of_regd, regd_to_copy;
989 struct ieee80211_reg_rule *rule;
990 struct regdb_ptrs *regdb_ptrs;
991 enum nl80211_band band;
992 int center_freq, prev_center_freq = 0;
993 int valid_rules = 0;
994 bool new_rule;
995 int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
996 IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
997
998 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
999 return ERR_PTR(-EINVAL);
1000
1001 if (WARN_ON(num_of_ch > max_num_ch))
1002 num_of_ch = max_num_ch;
1003
1004 IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n",
1005 num_of_ch);
1006
1007 /* build a regdomain rule for every valid channel */
1008 size_of_regd =
1009 sizeof(struct ieee80211_regdomain) +
1010 num_of_ch * sizeof(struct ieee80211_reg_rule);
1011
1012 regd = kzalloc(size_of_regd, GFP_KERNEL);
1013 if (!regd)
1014 return ERR_PTR(-ENOMEM);
1015
1016 regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL);
1017 if (!regdb_ptrs) {
1018 copy_rd = ERR_PTR(-ENOMEM);
1019 goto out;
1020 }
1021
1022 /* set alpha2 from FW. */
1023 regd->alpha2[0] = fw_mcc >> 8;
1024 regd->alpha2[1] = fw_mcc & 0xff;
1025
1026 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
1027 ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
1028 band = (ch_idx < NUM_2GHZ_CHANNELS) ?
1029 NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
1030 center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
1031 band);
1032 new_rule = false;
1033
1034 if (!(ch_flags & NVM_CHANNEL_VALID)) {
1035 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
1036 nvm_chan[ch_idx], ch_flags);
1037 continue;
1038 }
1039
1040 reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
1041 ch_flags, cfg);
1042
1043 /* we can't continue the same rule */
1044 if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
1045 center_freq - prev_center_freq > 20) {
1046 valid_rules++;
1047 new_rule = true;
1048 }
1049
1050 rule = ®d->reg_rules[valid_rules - 1];
1051
1052 if (new_rule)
1053 rule->freq_range.start_freq_khz =
1054 MHZ_TO_KHZ(center_freq - 10);
1055
1056 rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10);
1057
1058 /* this doesn't matter - not used by FW */
1059 rule->power_rule.max_antenna_gain = DBI_TO_MBI(6);
1060 rule->power_rule.max_eirp =
1061 DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
1062
1063 rule->flags = reg_rule_flags;
1064
1065 /* rely on auto-calculation to merge BW of contiguous chans */
1066 rule->flags |= NL80211_RRF_AUTO_BW;
1067 rule->freq_range.max_bandwidth_khz = 0;
1068
1069 prev_center_freq = center_freq;
1070 prev_reg_rule_flags = reg_rule_flags;
1071
1072 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
1073 nvm_chan[ch_idx], ch_flags);
1074
1075 if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) ||
1076 band == NL80211_BAND_2GHZ)
1077 continue;
1078
1079 reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
1080 }
1081
1082 regd->n_reg_rules = valid_rules;
1083
1084 /*
1085 * Narrow down regdom for unused regulatory rules to prevent hole
1086 * between reg rules to wmm rules.
1087 */
1088 regd_to_copy = sizeof(struct ieee80211_regdomain) +
1089 valid_rules * sizeof(struct ieee80211_reg_rule);
1090
1091 copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
1092 if (!copy_rd) {
1093 copy_rd = ERR_PTR(-ENOMEM);
1094 goto out;
1095 }
1096
1097 memcpy(copy_rd, regd, regd_to_copy);
1098
1099 out:
1100 kfree(regdb_ptrs);
1101 kfree(regd);
1102 return copy_rd;
1103 }
1104 IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
1105
1106 #define IWL_MAX_NVM_SECTION_SIZE 0x1b58
1107 #define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc
1108 #define MAX_NVM_FILE_LEN 16384
1109
iwl_nvm_fixups(u32 hw_id,unsigned int section,u8 * data,unsigned int len)1110 void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
1111 unsigned int len)
1112 {
1113 #define IWL_4165_DEVICE_ID 0x5501
1114 #define NVM_SKU_CAP_MIMO_DISABLE BIT(5)
1115
1116 if (section == NVM_SECTION_TYPE_PHY_SKU &&
1117 hw_id == IWL_4165_DEVICE_ID && data && len >= 5 &&
1118 (data[4] & NVM_SKU_CAP_MIMO_DISABLE))
1119 /* OTP 0x52 bug work around: it's a 1x1 device */
1120 data[3] = ANT_B | (ANT_B << 4);
1121 }
1122 IWL_EXPORT_SYMBOL(iwl_nvm_fixups);
1123
1124 /*
1125 * Reads external NVM from a file into mvm->nvm_sections
1126 *
1127 * HOW TO CREATE THE NVM FILE FORMAT:
1128 * ------------------------------
1129 * 1. create hex file, format:
1130 * 3800 -> header
1131 * 0000 -> header
1132 * 5a40 -> data
1133 *
1134 * rev - 6 bit (word1)
1135 * len - 10 bit (word1)
1136 * id - 4 bit (word2)
1137 * rsv - 12 bit (word2)
1138 *
1139 * 2. flip 8bits with 8 bits per line to get the right NVM file format
1140 *
1141 * 3. create binary file from the hex file
1142 *
1143 * 4. save as "iNVM_xxx.bin" under /lib/firmware
1144 */
iwl_read_external_nvm(struct iwl_trans * trans,const char * nvm_file_name,struct iwl_nvm_section * nvm_sections)1145 int iwl_read_external_nvm(struct iwl_trans *trans,
1146 const char *nvm_file_name,
1147 struct iwl_nvm_section *nvm_sections)
1148 {
1149 int ret, section_size;
1150 u16 section_id;
1151 const struct firmware *fw_entry;
1152 const struct {
1153 __le16 word1;
1154 __le16 word2;
1155 u8 data[];
1156 } *file_sec;
1157 const u8 *eof;
1158 u8 *temp;
1159 int max_section_size;
1160 const __le32 *dword_buff;
1161
1162 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
1163 #define NVM_WORD2_ID(x) (x >> 12)
1164 #define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8))
1165 #define EXT_NVM_WORD1_ID(x) ((x) >> 4)
1166 #define NVM_HEADER_0 (0x2A504C54)
1167 #define NVM_HEADER_1 (0x4E564D2A)
1168 #define NVM_HEADER_SIZE (4 * sizeof(u32))
1169
1170 IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n");
1171
1172 /* Maximal size depends on NVM version */
1173 if (trans->cfg->nvm_type != IWL_NVM_EXT)
1174 max_section_size = IWL_MAX_NVM_SECTION_SIZE;
1175 else
1176 max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
1177
1178 /*
1179 * Obtain NVM image via request_firmware. Since we already used
1180 * request_firmware_nowait() for the firmware binary load and only
1181 * get here after that we assume the NVM request can be satisfied
1182 * synchronously.
1183 */
1184 ret = request_firmware(&fw_entry, nvm_file_name, trans->dev);
1185 if (ret) {
1186 IWL_ERR(trans, "ERROR: %s isn't available %d\n",
1187 nvm_file_name, ret);
1188 return ret;
1189 }
1190
1191 IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n",
1192 nvm_file_name, fw_entry->size);
1193
1194 if (fw_entry->size > MAX_NVM_FILE_LEN) {
1195 IWL_ERR(trans, "NVM file too large\n");
1196 ret = -EINVAL;
1197 goto out;
1198 }
1199
1200 eof = fw_entry->data + fw_entry->size;
1201 dword_buff = (__le32 *)fw_entry->data;
1202
1203 /* some NVM file will contain a header.
1204 * The header is identified by 2 dwords header as follow:
1205 * dword[0] = 0x2A504C54
1206 * dword[1] = 0x4E564D2A
1207 *
1208 * This header must be skipped when providing the NVM data to the FW.
1209 */
1210 if (fw_entry->size > NVM_HEADER_SIZE &&
1211 dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
1212 dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
1213 file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
1214 IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
1215 IWL_INFO(trans, "NVM Manufacturing date %08X\n",
1216 le32_to_cpu(dword_buff[3]));
1217
1218 /* nvm file validation, dword_buff[2] holds the file version */
1219 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1220 CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP &&
1221 le32_to_cpu(dword_buff[2]) < 0xE4A) {
1222 ret = -EFAULT;
1223 goto out;
1224 }
1225 } else {
1226 file_sec = (void *)fw_entry->data;
1227 }
1228
1229 while (true) {
1230 if (file_sec->data > eof) {
1231 IWL_ERR(trans,
1232 "ERROR - NVM file too short for section header\n");
1233 ret = -EINVAL;
1234 break;
1235 }
1236
1237 /* check for EOF marker */
1238 if (!file_sec->word1 && !file_sec->word2) {
1239 ret = 0;
1240 break;
1241 }
1242
1243 if (trans->cfg->nvm_type != IWL_NVM_EXT) {
1244 section_size =
1245 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
1246 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
1247 } else {
1248 section_size = 2 * EXT_NVM_WORD2_LEN(
1249 le16_to_cpu(file_sec->word2));
1250 section_id = EXT_NVM_WORD1_ID(
1251 le16_to_cpu(file_sec->word1));
1252 }
1253
1254 if (section_size > max_section_size) {
1255 IWL_ERR(trans, "ERROR - section too large (%d)\n",
1256 section_size);
1257 ret = -EINVAL;
1258 break;
1259 }
1260
1261 if (!section_size) {
1262 IWL_ERR(trans, "ERROR - section empty\n");
1263 ret = -EINVAL;
1264 break;
1265 }
1266
1267 if (file_sec->data + section_size > eof) {
1268 IWL_ERR(trans,
1269 "ERROR - NVM file too short for section (%d bytes)\n",
1270 section_size);
1271 ret = -EINVAL;
1272 break;
1273 }
1274
1275 if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
1276 "Invalid NVM section ID %d\n", section_id)) {
1277 ret = -EINVAL;
1278 break;
1279 }
1280
1281 temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
1282 if (!temp) {
1283 ret = -ENOMEM;
1284 break;
1285 }
1286
1287 iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size);
1288
1289 kfree(nvm_sections[section_id].data);
1290 nvm_sections[section_id].data = temp;
1291 nvm_sections[section_id].length = section_size;
1292
1293 /* advance to the next section */
1294 file_sec = (void *)(file_sec->data + section_size);
1295 }
1296 out:
1297 release_firmware(fw_entry);
1298 return ret;
1299 }
1300 IWL_EXPORT_SYMBOL(iwl_read_external_nvm);
1301
iwl_get_nvm(struct iwl_trans * trans,const struct iwl_fw * fw)1302 struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
1303 const struct iwl_fw *fw)
1304 {
1305 struct iwl_nvm_get_info cmd = {};
1306 struct iwl_nvm_get_info_rsp *rsp;
1307 struct iwl_nvm_data *nvm;
1308 struct iwl_host_cmd hcmd = {
1309 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
1310 .data = { &cmd, },
1311 .len = { sizeof(cmd) },
1312 .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
1313 };
1314 int ret;
1315 bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
1316 fw_has_capa(&fw->ucode_capa,
1317 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
1318 u32 mac_flags;
1319 u32 sbands_flags = 0;
1320
1321 ret = iwl_trans_send_cmd(trans, &hcmd);
1322 if (ret)
1323 return ERR_PTR(ret);
1324
1325 if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
1326 "Invalid payload len in NVM response from FW %d",
1327 iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
1328 ret = -EINVAL;
1329 goto out;
1330 }
1331
1332 rsp = (void *)hcmd.resp_pkt->data;
1333 if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP)
1334 IWL_INFO(trans, "OTP is empty\n");
1335
1336 nvm = kzalloc(sizeof(*nvm) +
1337 sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
1338 GFP_KERNEL);
1339 if (!nvm) {
1340 ret = -ENOMEM;
1341 goto out;
1342 }
1343
1344 iwl_set_hw_address_from_csr(trans, nvm);
1345 /* TODO: if platform NVM has MAC address - override it here */
1346
1347 if (!is_valid_ether_addr(nvm->hw_addr)) {
1348 IWL_ERR(trans, "no valid mac address was found\n");
1349 ret = -EINVAL;
1350 goto err_free;
1351 }
1352
1353 IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr);
1354
1355 /* Initialize general data */
1356 nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
1357
1358 /* Initialize MAC sku data */
1359 mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags);
1360 nvm->sku_cap_11ac_enable =
1361 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
1362 nvm->sku_cap_11n_enable =
1363 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
1364 nvm->sku_cap_11ax_enable =
1365 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
1366 nvm->sku_cap_band_24ghz_enable =
1367 !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
1368 nvm->sku_cap_band_52ghz_enable =
1369 !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
1370 nvm->sku_cap_mimo_disabled =
1371 !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
1372
1373 /* Initialize PHY sku data */
1374 nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
1375 nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
1376
1377 if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
1378 nvm->lar_enabled = true;
1379 sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
1380 }
1381
1382 iwl_init_sbands(trans->dev, trans->cfg, nvm,
1383 rsp->regulatory.channel_profile,
1384 nvm->valid_tx_ant & fw->valid_tx_ant,
1385 nvm->valid_rx_ant & fw->valid_rx_ant,
1386 sbands_flags);
1387
1388 iwl_free_resp(&hcmd);
1389 return nvm;
1390
1391 err_free:
1392 kfree(nvm);
1393 out:
1394 iwl_free_resp(&hcmd);
1395 return ERR_PTR(ret);
1396 }
1397 IWL_EXPORT_SYMBOL(iwl_get_nvm);
1398