Lines Matching +full:retain +full:- +full:state +full:- +full:suspended
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
41 * * Redistributions of source code must retain the above copyright
72 #include "iwl-debug.h"
73 #include "iwl-config.h"
75 #include "iwl-op-mode.h"
79 #include "fw/api/dbg-tlv.h"
80 #include "iwl-dbg-tlv.h"
83 * DOC: Transport layer - what is it ?
118 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
134 * 28-27: Reserved
140 * 21-16: RX queue
141 * 15-14: Reserved
142 * 13-00: RX frame size
151 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; in iwl_rx_packet_len()
156 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr); in iwl_rx_packet_payload_len()
160 * enum CMD_MODE - how to send the host commands ?
192 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
200 * struct iwl_device_tx_cmd - buffer for TX command
223 #define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
226 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
252 * struct iwl_host_cmd - Host command to the uCode
278 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order); in iwl_free_resp()
291 return (void *)((unsigned long)page_address(r->_page) + r->_offset); in rxb_addr()
296 return r->_offset; in rxb_offset()
301 r->_page_stolen = true; in rxb_steal_page()
302 get_page(r->_page); in rxb_steal_page()
303 return r->_page; in rxb_steal_page()
308 __free_pages(r->_page, r->_rx_page_order); in iwl_free_rxb()
313 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
329 * enum iwl_wowlan_status - WoWLAN image/device status
331 * @IWL_D3_STATUS_RESET: device was reset while suspended
344 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
345 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
346 * @STATUS_FW_ERROR: the fw is in error state
349 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
350 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
379 return -1; in iwl_trans_get_rb_size_order()
418 * struct iwl_trans_config - transport configuration
430 * @rx_buf_size: RX buffer size needed for A-MSDUs
439 * @cb_data_offs: offset inside skb->cb to store transport data at, must have
477 * struct iwl_trans_rxq_dma_data - RX queue DMA data
491 * struct iwl_trans_ops - transport specific operations
516 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
518 * return -ERFKILL straight away.
521 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
535 * @txq_disable: de-configure a Tx queue to send AMPDUs
560 * @set_pmi: set the power pmi state
561 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
566 * @set_bits_mask - set SRAM register according to value and mask.
632 void (*set_pmi)(struct iwl_trans *trans, bool state);
650 * enum iwl_trans_state - state of the transport layer
663 * In system-wide power management the entire platform goes into a low
664 * power state (e.g. idle or suspend to RAM) at the same time and the
669 * mode is dictated by the wake-on-WLAN configuration.
673 * - D0: the device is fully powered and the host is awake;
674 * - D3: the device is in low power mode and only reacts to
675 * specific events (e.g. magic-packet received or scan
679 * be confused with the physical device power state.
683 * enum iwl_plat_pm_mode - platform power management mode
686 * behavior when in system-wide suspend (i.e WoWLAN).
689 * device. In system-wide suspend mode, it means that the all
691 * the platform is suspended.
728 * struct iwl_fw_mon - fw monitor per allocation id
738 * struct iwl_self_init_dram - dram data used by self init process
752 * struct iwl_trans_debug - transport debug related data
763 * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
764 * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
847 * struct iwl_txq - Tx Queue for DMA
851 * the writeback -- this is DMA memory and an array holding one buffer
854 * @entries: transmit entries (driver state)
860 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
864 * @write_ptr: 1-st empty entry (index) host_w
918 * struct iwl_trans_txqs - transport tx queues data
921 * @page_offs: offset from skb->cb to mac header page pointer
922 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
923 * @queue_used - bit mask of used queues
924 * @queue_stopped - bit mask of stopped queues
954 * struct iwl_trans - transport common data
956 * @ops - pointer to iwl_trans_ops
957 * @op_mode - pointer to the op_mode
958 * @trans_cfg: the trans-specific configuration part
959 * @cfg - pointer to the configuration
960 * @drv - pointer to iwl_drv
961 * @status: a bit-mask of transport status flags
962 * @dev - pointer to struct device * that represents the device
966 * @hw_id: a u32 with the ID of the device / sub-device.
976 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
983 * @system_pm_mode: the system-wide power management mode in use.
994 enum iwl_trans_state state; member
1049 trans->op_mode = trans_cfg->op_mode; in iwl_trans_configure()
1051 trans->ops->configure(trans, trans_cfg); in iwl_trans_configure()
1059 return trans->ops->start_hw(trans); in iwl_trans_start_hw()
1066 if (trans->ops->op_mode_leave) in iwl_trans_op_mode_leave()
1067 trans->ops->op_mode_leave(trans); in iwl_trans_op_mode_leave()
1069 trans->op_mode = NULL; in iwl_trans_op_mode_leave()
1071 trans->state = IWL_TRANS_NO_FW; in iwl_trans_op_mode_leave()
1078 trans->state = IWL_TRANS_FW_ALIVE; in iwl_trans_fw_alive()
1080 trans->ops->fw_alive(trans, scd_addr); in iwl_trans_fw_alive()
1089 WARN_ON_ONCE(!trans->rx_mpdu_cmd); in iwl_trans_start_fw()
1091 clear_bit(STATUS_FW_ERROR, &trans->status); in iwl_trans_start_fw()
1092 return trans->ops->start_fw(trans, fw, run_in_rfkill); in iwl_trans_start_fw()
1099 trans->ops->stop_device(trans); in iwl_trans_stop_device()
1101 trans->state = IWL_TRANS_NO_FW; in iwl_trans_stop_device()
1108 if (!trans->ops->d3_suspend) in iwl_trans_d3_suspend()
1111 return trans->ops->d3_suspend(trans, test, reset); in iwl_trans_d3_suspend()
1119 if (!trans->ops->d3_resume) in iwl_trans_d3_resume()
1122 return trans->ops->d3_resume(trans, status, test, reset); in iwl_trans_d3_resume()
1127 if (!trans->ops->suspend) in iwl_trans_suspend()
1130 return trans->ops->suspend(trans); in iwl_trans_suspend()
1135 if (trans->ops->resume) in iwl_trans_resume()
1136 trans->ops->resume(trans); in iwl_trans_resume()
1142 if (!trans->ops->dump_data) in iwl_trans_dump_data()
1144 return trans->ops->dump_data(trans, dump_mask); in iwl_trans_dump_data()
1150 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC); in iwl_trans_alloc_tx_cmd()
1158 kmem_cache_free(trans->dev_cmd_pool, dev_cmd); in iwl_trans_free_tx_cmd()
1164 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) in iwl_trans_tx()
1165 return -EIO; in iwl_trans_tx()
1167 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_tx()
1168 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_tx()
1169 return -EIO; in iwl_trans_tx()
1172 return trans->ops->tx(trans, skb, dev_cmd, queue); in iwl_trans_tx()
1178 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_reclaim()
1179 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_reclaim()
1183 trans->ops->reclaim(trans, queue, ssn, skbs); in iwl_trans_reclaim()
1189 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_set_q_ptrs()
1190 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_set_q_ptrs()
1194 trans->ops->set_q_ptrs(trans, queue, ptr); in iwl_trans_set_q_ptrs()
1200 trans->ops->txq_disable(trans, queue, configure_scd); in iwl_trans_txq_disable()
1210 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_txq_enable_cfg()
1211 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_txq_enable_cfg()
1215 return trans->ops->txq_enable(trans, queue, ssn, in iwl_trans_txq_enable_cfg()
1223 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data)) in iwl_trans_get_rxq_dma_data()
1224 return -ENOTSUPP; in iwl_trans_get_rxq_dma_data()
1226 return trans->ops->rxq_dma_data(trans, queue, data); in iwl_trans_get_rxq_dma_data()
1232 if (WARN_ON_ONCE(!trans->ops->txq_free)) in iwl_trans_txq_free()
1235 trans->ops->txq_free(trans, queue); in iwl_trans_txq_free()
1246 if (WARN_ON_ONCE(!trans->ops->txq_alloc)) in iwl_trans_txq_alloc()
1247 return -ENOTSUPP; in iwl_trans_txq_alloc()
1249 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_txq_alloc()
1250 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_txq_alloc()
1251 return -EIO; in iwl_trans_txq_alloc()
1254 return trans->ops->txq_alloc(trans, flags, sta_id, tid, in iwl_trans_txq_alloc()
1261 if (trans->ops->txq_set_shared_mode) in iwl_trans_txq_set_shared_mode()
1262 trans->ops->txq_set_shared_mode(trans, queue, shared_mode); in iwl_trans_txq_set_shared_mode()
1287 .sta_id = -1, in iwl_trans_ac_txq_enable()
1300 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_freeze_txq_timer()
1301 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_freeze_txq_timer()
1305 if (trans->ops->freeze_txq_timer) in iwl_trans_freeze_txq_timer()
1306 trans->ops->freeze_txq_timer(trans, txqs, freeze); in iwl_trans_freeze_txq_timer()
1312 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_block_txq_ptrs()
1313 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_block_txq_ptrs()
1317 if (trans->ops->block_txq_ptrs) in iwl_trans_block_txq_ptrs()
1318 trans->ops->block_txq_ptrs(trans, block); in iwl_trans_block_txq_ptrs()
1324 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty)) in iwl_trans_wait_tx_queues_empty()
1325 return -ENOTSUPP; in iwl_trans_wait_tx_queues_empty()
1327 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_wait_tx_queues_empty()
1328 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_wait_tx_queues_empty()
1329 return -EIO; in iwl_trans_wait_tx_queues_empty()
1332 return trans->ops->wait_tx_queues_empty(trans, txqs); in iwl_trans_wait_tx_queues_empty()
1337 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty)) in iwl_trans_wait_txq_empty()
1338 return -ENOTSUPP; in iwl_trans_wait_txq_empty()
1340 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { in iwl_trans_wait_txq_empty()
1341 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); in iwl_trans_wait_txq_empty()
1342 return -EIO; in iwl_trans_wait_txq_empty()
1345 return trans->ops->wait_txq_empty(trans, queue); in iwl_trans_wait_txq_empty()
1350 trans->ops->write8(trans, ofs, val); in iwl_trans_write8()
1355 trans->ops->write32(trans, ofs, val); in iwl_trans_write32()
1360 return trans->ops->read32(trans, ofs); in iwl_trans_read32()
1365 return trans->ops->read_prph(trans, ofs); in iwl_trans_read_prph()
1371 return trans->ops->write_prph(trans, ofs, val); in iwl_trans_write_prph()
1377 return trans->ops->read_mem(trans, addr, buf, dwords); in iwl_trans_read_mem()
1400 return trans->ops->write_mem(trans, addr, buf, dwords); in iwl_trans_write_mem()
1409 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) in iwl_trans_set_pmi() argument
1411 if (trans->ops->set_pmi) in iwl_trans_set_pmi()
1412 trans->ops->set_pmi(trans, state); in iwl_trans_set_pmi()
1417 if (trans->ops->sw_reset) in iwl_trans_sw_reset()
1418 trans->ops->sw_reset(trans); in iwl_trans_sw_reset()
1424 trans->ops->set_bits_mask(trans, reg, mask, value); in iwl_trans_set_bits_mask()
1429 likely((trans)->ops->grab_nic_access(trans, flags)))
1434 trans->ops->release_nic_access(trans, flags); in __releases()
1440 if (WARN_ON_ONCE(!trans->op_mode)) in iwl_trans_fw_error()
1444 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) in iwl_trans_fw_error()
1445 iwl_op_mode_nic_error(trans->op_mode); in iwl_trans_fw_error()
1450 return trans->state == IWL_TRANS_FW_ALIVE; in iwl_trans_fw_running()
1455 if (trans->ops->sync_nmi) in iwl_trans_sync_nmi()
1456 trans->ops->sync_nmi(trans); in iwl_trans_sync_nmi()
1462 if (trans->ops->set_pnvm) { in iwl_trans_set_pnvm()
1463 int ret = trans->ops->set_pnvm(trans, data, len); in iwl_trans_set_pnvm()
1469 trans->pnvm_loaded = true; in iwl_trans_set_pnvm()
1476 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED || in iwl_trans_dbg_ini_valid()
1477 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED; in iwl_trans_dbg_ini_valid()