Lines Matching +full:- +full:section

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
9 #include "iwl-trans.h"
10 #include "iwl-csr.h"
12 #include "iwl-eeprom-parse.h"
13 #include "iwl-eeprom-read.h"
14 #include "iwl-nvm-parse.h"
15 #include "iwl-prph.h"
34 static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section, in iwl_nvm_write_chunk() argument
40 .type = cpu_to_le16(section), in iwl_nvm_write_chunk()
61 nvm_resp = (void *)pkt->data; in iwl_nvm_write_chunk()
62 if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) { in iwl_nvm_write_chunk()
64 "NVM access write command failed for section %u (status = 0x%x)\n", in iwl_nvm_write_chunk()
65 section, le16_to_cpu(nvm_resp->status)); in iwl_nvm_write_chunk()
66 ret = -EIO; in iwl_nvm_write_chunk()
73 static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, in iwl_nvm_read_chunk() argument
79 .type = cpu_to_le16(section), in iwl_nvm_read_chunk()
101 nvm_resp = (void *)pkt->data; in iwl_nvm_read_chunk()
102 ret = le16_to_cpu(nvm_resp->status); in iwl_nvm_read_chunk()
103 bytes_read = le16_to_cpu(nvm_resp->length); in iwl_nvm_read_chunk()
104 offset_read = le16_to_cpu(nvm_resp->offset); in iwl_nvm_read_chunk()
105 resp_data = nvm_resp->data; in iwl_nvm_read_chunk()
117 IWL_DEBUG_EEPROM(mvm->trans->dev, in iwl_nvm_read_chunk()
118 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n", in iwl_nvm_read_chunk()
122 IWL_DEBUG_EEPROM(mvm->trans->dev, in iwl_nvm_read_chunk()
124 ret, mvm->trans->name); in iwl_nvm_read_chunk()
125 ret = -ENODATA; in iwl_nvm_read_chunk()
133 ret = -EINVAL; in iwl_nvm_read_chunk()
146 static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section, in iwl_nvm_write_section() argument
157 length - offset); in iwl_nvm_write_section()
159 ret = iwl_nvm_write_chunk(mvm, section, offset, in iwl_nvm_write_section()
171 * Reads an NVM section completely.
173 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
180 static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, in iwl_nvm_read_section() argument
186 /* Set nvm section read length */ in iwl_nvm_read_section()
195 mvm->trans->trans_cfg->base_params->eeprom_size) { in iwl_nvm_read_section()
197 return -ENOBUFS; in iwl_nvm_read_section()
200 ret = iwl_nvm_read_chunk(mvm, section, offset, length, data); in iwl_nvm_read_section()
202 IWL_DEBUG_EEPROM(mvm->trans->dev, in iwl_nvm_read_section()
203 "Cannot read NVM from section %d offset %d, length %d\n", in iwl_nvm_read_section()
204 section, offset, length); in iwl_nvm_read_section()
210 iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset); in iwl_nvm_read_section()
212 IWL_DEBUG_EEPROM(mvm->trans->dev, in iwl_nvm_read_section()
213 "NVM section %d read completed\n", section); in iwl_nvm_read_section()
220 struct iwl_nvm_section *sections = mvm->nvm_sections; in iwl_parse_nvm_sections()
226 if (mvm->trans->cfg->nvm_type == IWL_NVM) { in iwl_parse_nvm_sections()
227 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || in iwl_parse_nvm_sections()
228 !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { in iwl_parse_nvm_sections()
233 if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP) in iwl_parse_nvm_sections()
239 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || in iwl_parse_nvm_sections()
240 !mvm->nvm_sections[regulatory_type].data) { in iwl_parse_nvm_sections()
245 /* MAC_OVERRIDE or at least HW section must exist */ in iwl_parse_nvm_sections()
246 if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data && in iwl_parse_nvm_sections()
247 !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) { in iwl_parse_nvm_sections()
253 /* PHY_SKU section is mandatory in B0 */ in iwl_parse_nvm_sections()
254 if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT && in iwl_parse_nvm_sections()
255 !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) { in iwl_parse_nvm_sections()
262 hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data; in iwl_parse_nvm_sections()
269 regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ? in iwl_parse_nvm_sections()
273 return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib, in iwl_parse_nvm_sections()
275 mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant); in iwl_parse_nvm_sections()
278 /* Loads the NVM data stored in mvm->nvm_sections into the NIC */
282 struct iwl_nvm_section *sections = mvm->nvm_sections; in iwl_mvm_load_nvm_to_nic()
284 IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n"); in iwl_mvm_load_nvm_to_nic()
286 for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) { in iwl_mvm_load_nvm_to_nic()
287 if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length) in iwl_mvm_load_nvm_to_nic()
301 int ret, section; in iwl_nvm_init() local
304 const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step; in iwl_nvm_init()
306 if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS)) in iwl_nvm_init()
307 return -EINVAL; in iwl_nvm_init()
311 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); in iwl_nvm_init()
313 nvm_buffer = kmalloc(mvm->trans->trans_cfg->base_params->eeprom_size, in iwl_nvm_init()
316 return -ENOMEM; in iwl_nvm_init()
317 for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) { in iwl_nvm_init()
319 ret = iwl_nvm_read_section(mvm, section, nvm_buffer, in iwl_nvm_init()
321 if (ret == -ENODATA) { in iwl_nvm_init()
330 ret = -ENOMEM; in iwl_nvm_init()
334 iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret); in iwl_nvm_init()
336 mvm->nvm_sections[section].data = temp; in iwl_nvm_init()
337 mvm->nvm_sections[section].length = ret; in iwl_nvm_init()
340 switch (section) { in iwl_nvm_init()
342 mvm->nvm_sw_blob.data = temp; in iwl_nvm_init()
343 mvm->nvm_sw_blob.size = ret; in iwl_nvm_init()
346 mvm->nvm_calib_blob.data = temp; in iwl_nvm_init()
347 mvm->nvm_calib_blob.size = ret; in iwl_nvm_init()
350 mvm->nvm_prod_blob.data = temp; in iwl_nvm_init()
351 mvm->nvm_prod_blob.size = ret; in iwl_nvm_init()
354 mvm->nvm_phy_sku_blob.data = temp; in iwl_nvm_init()
355 mvm->nvm_phy_sku_blob.size = ret; in iwl_nvm_init()
359 mvm->nvm_reg_blob.data = temp; in iwl_nvm_init()
360 mvm->nvm_reg_blob.size = ret; in iwl_nvm_init()
363 if (section == mvm->cfg->nvm_hw_section_num) { in iwl_nvm_init()
364 mvm->nvm_hw_blob.data = temp; in iwl_nvm_init()
365 mvm->nvm_hw_blob.size = ret; in iwl_nvm_init()
375 /* Only if PNVM selected in the mod param - load external NVM */ in iwl_nvm_init()
376 if (mvm->nvm_file_name) { in iwl_nvm_init()
378 ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, in iwl_nvm_init()
379 mvm->nvm_sections); in iwl_nvm_init()
381 mvm->nvm_file_name = nvm_file_C; in iwl_nvm_init()
383 if ((ret == -EFAULT || ret == -ENOENT) && in iwl_nvm_init()
384 mvm->nvm_file_name) { in iwl_nvm_init()
386 ret = iwl_read_external_nvm(mvm->trans, in iwl_nvm_init()
387 mvm->nvm_file_name, in iwl_nvm_init()
388 mvm->nvm_sections); in iwl_nvm_init()
398 mvm->nvm_data = iwl_parse_nvm_sections(mvm); in iwl_nvm_init()
399 if (!mvm->nvm_data) in iwl_nvm_init()
400 return -ENODATA; in iwl_nvm_init()
401 IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n", in iwl_nvm_init()
402 mvm->nvm_data->nvm_version); in iwl_nvm_init()
429 return ERR_PTR(-EOPNOTSUPP); in iwl_mvm_update_mcc()
443 if (fw_has_capa(&mvm->fw->ucode_capa, in iwl_mvm_update_mcc()
445 struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data; in iwl_mvm_update_mcc()
447 n_channels = __le32_to_cpu(mcc_resp->n_channels); in iwl_mvm_update_mcc()
452 resp_cp = ERR_PTR(-ENOMEM); in iwl_mvm_update_mcc()
456 struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data; in iwl_mvm_update_mcc()
458 n_channels = __le32_to_cpu(mcc_resp_v3->n_channels); in iwl_mvm_update_mcc()
463 resp_cp = ERR_PTR(-ENOMEM); in iwl_mvm_update_mcc()
467 resp_cp->status = mcc_resp_v3->status; in iwl_mvm_update_mcc()
468 resp_cp->mcc = mcc_resp_v3->mcc; in iwl_mvm_update_mcc()
469 resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap); in iwl_mvm_update_mcc()
470 resp_cp->source_id = mcc_resp_v3->source_id; in iwl_mvm_update_mcc()
471 resp_cp->time = mcc_resp_v3->time; in iwl_mvm_update_mcc()
472 resp_cp->geo_info = mcc_resp_v3->geo_info; in iwl_mvm_update_mcc()
473 resp_cp->n_channels = mcc_resp_v3->n_channels; in iwl_mvm_update_mcc()
474 memcpy(resp_cp->channels, mcc_resp_v3->channels, in iwl_mvm_update_mcc()
478 status = le32_to_cpu(resp_cp->status); in iwl_mvm_update_mcc()
480 mcc = le16_to_cpu(resp_cp->mcc); in iwl_mvm_update_mcc()
482 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ in iwl_mvm_update_mcc()
484 mcc = 0x3030; /* "00" - world */ in iwl_mvm_update_mcc()
485 resp_cp->mcc = cpu_to_le16(mcc); in iwl_mvm_update_mcc()
505 if (mvm->cfg->nvm_type == IWL_NVM_EXT) { in iwl_mvm_init_mcc()
506 tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, in iwl_mvm_init_mcc()
508 nvm_lar = mvm->nvm_data->lar_enabled; in iwl_mvm_init_mcc()
524 if (retval != -ENOENT) in iwl_mvm_init_mcc()
533 mvm->lar_regdom_set = false; in iwl_mvm_init_mcc()
537 return -EIO; in iwl_mvm_init_mcc()
540 !iwl_acpi_get_mcc(mvm->dev, mcc)) { in iwl_mvm_init_mcc()
542 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, in iwl_mvm_init_mcc()
545 return -EIO; in iwl_mvm_init_mcc()
548 retval = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd); in iwl_mvm_init_mcc()
557 struct iwl_mcc_chub_notif *notif = (void *)pkt->data; in iwl_mvm_rx_chub_update_mcc()
563 lockdep_assert_held(&mvm->mutex); in iwl_mvm_rx_chub_update_mcc()
565 if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) { in iwl_mvm_rx_chub_update_mcc()
573 mcc[0] = le16_to_cpu(notif->mcc) >> 8; in iwl_mvm_rx_chub_update_mcc()
574 mcc[1] = le16_to_cpu(notif->mcc) & 0xff; in iwl_mvm_rx_chub_update_mcc()
576 src = notif->source_id; in iwl_mvm_rx_chub_update_mcc()
581 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL); in iwl_mvm_rx_chub_update_mcc()
593 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); in iwl_mvm_rx_chub_update_mcc()