Lines Matching refs:mvm
77 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) in iwl_mvm_send_cmd() argument
82 if (WARN_ON(mvm->d3_test_active)) in iwl_mvm_send_cmd()
92 lockdep_assert_held(&mvm->mutex); in iwl_mvm_send_cmd()
94 ret = iwl_trans_send_cmd(mvm->trans, cmd); in iwl_mvm_send_cmd()
110 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, in iwl_mvm_send_cmd_pdu() argument
120 return iwl_mvm_send_cmd(mvm, &cmd); in iwl_mvm_send_cmd_pdu()
126 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, in iwl_mvm_send_cmd_status() argument
133 lockdep_assert_held(&mvm->mutex); in iwl_mvm_send_cmd_status()
136 if (WARN_ON(mvm->d3_test_active)) in iwl_mvm_send_cmd_status()
150 ret = iwl_trans_send_cmd(mvm->trans, cmd); in iwl_mvm_send_cmd_status()
179 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, in iwl_mvm_send_cmd_pdu_status() argument
188 return iwl_mvm_send_cmd_status(mvm, &cmd, status); in iwl_mvm_send_cmd_pdu_status()
247 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) in iwl_mvm_rx_fw_error() argument
252 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n", in iwl_mvm_rx_fw_error()
254 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", in iwl_mvm_rx_fw_error()
257 IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n", in iwl_mvm_rx_fw_error()
279 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) in iwl_mvm_next_antenna() argument
462 static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) in iwl_mvm_dump_umac_error_log() argument
464 struct iwl_trans *trans = mvm->trans; in iwl_mvm_dump_umac_error_log()
466 u32 base = mvm->trans->dbg.umac_error_event_table; in iwl_mvm_dump_umac_error_log()
468 if (!mvm->support_umac_log && in iwl_mvm_dump_umac_error_log()
469 !(mvm->trans->dbg.error_event_table_tlv_status & in iwl_mvm_dump_umac_error_log()
476 mvm->fwrt.dump.umac_err_id = table.error_id; in iwl_mvm_dump_umac_error_log()
481 mvm->status, table.valid); in iwl_mvm_dump_umac_error_log()
484 IWL_ERR(mvm, "0x%08X | %s\n", table.error_id, in iwl_mvm_dump_umac_error_log()
486 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); in iwl_mvm_dump_umac_error_log()
487 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); in iwl_mvm_dump_umac_error_log()
488 IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); in iwl_mvm_dump_umac_error_log()
489 IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2); in iwl_mvm_dump_umac_error_log()
490 IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); in iwl_mvm_dump_umac_error_log()
491 IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); in iwl_mvm_dump_umac_error_log()
492 IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); in iwl_mvm_dump_umac_error_log()
493 IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major); in iwl_mvm_dump_umac_error_log()
494 IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor); in iwl_mvm_dump_umac_error_log()
495 IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); in iwl_mvm_dump_umac_error_log()
496 IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); in iwl_mvm_dump_umac_error_log()
497 IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); in iwl_mvm_dump_umac_error_log()
498 IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); in iwl_mvm_dump_umac_error_log()
501 static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num) in iwl_mvm_dump_lmac_error_log() argument
503 struct iwl_trans *trans = mvm->trans; in iwl_mvm_dump_lmac_error_log()
505 u32 val, base = mvm->trans->dbg.lmac_error_event_table[lmac_num]; in iwl_mvm_dump_lmac_error_log()
507 if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) { in iwl_mvm_dump_lmac_error_log()
509 base = mvm->fw->init_errlog_ptr; in iwl_mvm_dump_lmac_error_log()
512 base = mvm->fw->inst_errlog_ptr; in iwl_mvm_dump_lmac_error_log()
516 IWL_ERR(mvm, in iwl_mvm_dump_lmac_error_log()
519 (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) in iwl_mvm_dump_lmac_error_log()
542 mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id; in iwl_mvm_dump_lmac_error_log()
547 mvm->status, table.valid); in iwl_mvm_dump_lmac_error_log()
552 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); in iwl_mvm_dump_lmac_error_log()
554 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, in iwl_mvm_dump_lmac_error_log()
556 IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); in iwl_mvm_dump_lmac_error_log()
557 IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1); in iwl_mvm_dump_lmac_error_log()
558 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); in iwl_mvm_dump_lmac_error_log()
559 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); in iwl_mvm_dump_lmac_error_log()
560 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); in iwl_mvm_dump_lmac_error_log()
561 IWL_ERR(mvm, "0x%08X | data1\n", table.data1); in iwl_mvm_dump_lmac_error_log()
562 IWL_ERR(mvm, "0x%08X | data2\n", table.data2); in iwl_mvm_dump_lmac_error_log()
563 IWL_ERR(mvm, "0x%08X | data3\n", table.data3); in iwl_mvm_dump_lmac_error_log()
564 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); in iwl_mvm_dump_lmac_error_log()
565 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); in iwl_mvm_dump_lmac_error_log()
566 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); in iwl_mvm_dump_lmac_error_log()
567 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); in iwl_mvm_dump_lmac_error_log()
568 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); in iwl_mvm_dump_lmac_error_log()
569 IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type); in iwl_mvm_dump_lmac_error_log()
570 IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major); in iwl_mvm_dump_lmac_error_log()
571 IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor); in iwl_mvm_dump_lmac_error_log()
572 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); in iwl_mvm_dump_lmac_error_log()
573 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); in iwl_mvm_dump_lmac_error_log()
574 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); in iwl_mvm_dump_lmac_error_log()
575 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); in iwl_mvm_dump_lmac_error_log()
576 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); in iwl_mvm_dump_lmac_error_log()
577 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); in iwl_mvm_dump_lmac_error_log()
578 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); in iwl_mvm_dump_lmac_error_log()
579 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); in iwl_mvm_dump_lmac_error_log()
580 IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id); in iwl_mvm_dump_lmac_error_log()
581 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); in iwl_mvm_dump_lmac_error_log()
582 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); in iwl_mvm_dump_lmac_error_log()
583 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); in iwl_mvm_dump_lmac_error_log()
584 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); in iwl_mvm_dump_lmac_error_log()
585 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); in iwl_mvm_dump_lmac_error_log()
586 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); in iwl_mvm_dump_lmac_error_log()
587 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); in iwl_mvm_dump_lmac_error_log()
588 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); in iwl_mvm_dump_lmac_error_log()
591 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) in iwl_mvm_dump_nic_error_log() argument
593 if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) { in iwl_mvm_dump_nic_error_log()
594 IWL_ERR(mvm, in iwl_mvm_dump_nic_error_log()
599 iwl_mvm_dump_lmac_error_log(mvm, 0); in iwl_mvm_dump_nic_error_log()
601 if (mvm->trans->dbg.lmac_error_event_table[1]) in iwl_mvm_dump_nic_error_log()
602 iwl_mvm_dump_lmac_error_log(mvm, 1); in iwl_mvm_dump_nic_error_log()
604 iwl_mvm_dump_umac_error_log(mvm); in iwl_mvm_dump_nic_error_log()
606 iwl_fw_error_print_fseq_regs(&mvm->fwrt); in iwl_mvm_dump_nic_error_log()
609 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, in iwl_mvm_reconfig_scd() argument
625 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_reconfig_scd()
628 if (WARN(mvm->queue_info[queue].tid_bitmap == 0, in iwl_mvm_reconfig_scd()
632 IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); in iwl_mvm_reconfig_scd()
634 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); in iwl_mvm_reconfig_scd()
650 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq) in iwl_mvm_send_lq_cmd() argument
660 iwl_mvm_has_tlc_offload(mvm))) in iwl_mvm_send_lq_cmd()
663 return iwl_mvm_send_cmd(mvm, &cmd); in iwl_mvm_send_lq_cmd()
674 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, in iwl_mvm_update_smps() argument
682 lockdep_assert_held(&mvm->mutex); in iwl_mvm_update_smps()
685 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) in iwl_mvm_update_smps()
707 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) in iwl_mvm_request_statistics() argument
720 ret = iwl_mvm_send_cmd(mvm, &cmd); in iwl_mvm_request_statistics()
724 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); in iwl_mvm_request_statistics()
728 iwl_mvm_accu_radio_stats(mvm); in iwl_mvm_request_statistics()
733 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm) in iwl_mvm_accu_radio_stats() argument
735 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time; in iwl_mvm_accu_radio_stats()
736 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time; in iwl_mvm_accu_radio_stats()
737 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf; in iwl_mvm_accu_radio_stats()
738 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan; in iwl_mvm_accu_radio_stats()
755 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) in iwl_mvm_rx_diversity_allowed() argument
759 lockdep_assert_held(&mvm->mutex); in iwl_mvm_rx_diversity_allowed()
761 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) in iwl_mvm_rx_diversity_allowed()
764 if (mvm->cfg->rx_with_siso_diversity) in iwl_mvm_rx_diversity_allowed()
768 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, in iwl_mvm_rx_diversity_allowed()
774 void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, in iwl_mvm_send_low_latency_cmd() argument
781 if (!fw_has_capa(&mvm->fw->ucode_capa, in iwl_mvm_send_low_latency_cmd()
791 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD, in iwl_mvm_send_low_latency_cmd()
794 IWL_ERR(mvm, "Failed to send low latency command\n"); in iwl_mvm_send_low_latency_cmd()
797 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, in iwl_mvm_update_low_latency() argument
805 lockdep_assert_held(&mvm->mutex); in iwl_mvm_update_low_latency()
815 iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id); in iwl_mvm_update_low_latency()
817 res = iwl_mvm_update_quotas(mvm, false, NULL); in iwl_mvm_update_low_latency()
821 iwl_mvm_bt_coex_vif_change(mvm); in iwl_mvm_update_low_latency()
823 return iwl_mvm_power_update_mac(mvm); in iwl_mvm_update_low_latency()
848 bool iwl_mvm_low_latency(struct iwl_mvm *mvm) in iwl_mvm_low_latency() argument
853 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, in iwl_mvm_low_latency()
859 bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band) in iwl_mvm_low_latency_band() argument
864 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, in iwl_mvm_low_latency_band()
891 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) in iwl_mvm_get_bss_vif() argument
896 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, in iwl_mvm_get_bss_vif()
900 IWL_ERR(mvm, "More than one managed interface active!\n"); in iwl_mvm_get_bss_vif()
923 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm) in iwl_mvm_is_vif_assoc() argument
929 ieee80211_iterate_active_interfaces_atomic(mvm->hw, in iwl_mvm_is_vif_assoc()
936 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, in iwl_mvm_get_wd_timeout() argument
944 mvm->trans->trans_cfg->base_params->wd_timeout; in iwl_mvm_get_wd_timeout()
946 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) { in iwl_mvm_get_wd_timeout()
951 if (fw_has_capa(&mvm->fw->ucode_capa, in iwl_mvm_get_wd_timeout()
959 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); in iwl_mvm_get_wd_timeout()
988 return mvm->trans->trans_cfg->base_params->wd_timeout; in iwl_mvm_get_wd_timeout()
992 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, in iwl_mvm_connection_loss() argument
998 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), in iwl_mvm_connection_loss()
1009 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg); in iwl_mvm_connection_loss()
1015 void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, in iwl_mvm_event_frame_timeout_callback() argument
1023 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), in iwl_mvm_event_frame_timeout_callback()
1033 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, in iwl_mvm_event_frame_timeout_callback()
1047 iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed) in iwl_mvm_tcm_load() argument
1060 struct iwl_mvm *mvm; member
1067 struct iwl_mvm *mvm = data->mvm; in iwl_mvm_tcm_iter() local
1074 low_latency = mvm->tcm.result.low_latency[mvmvif->id]; in iwl_mvm_tcm_iter()
1076 if (!mvm->tcm.result.change[mvmvif->id] && in iwl_mvm_tcm_iter()
1078 iwl_mvm_update_quotas(mvm, false, NULL); in iwl_mvm_tcm_iter()
1084 iwl_mvm_update_low_latency(mvm, vif, low_latency, in iwl_mvm_tcm_iter()
1087 iwl_mvm_update_quotas(mvm, false, NULL); in iwl_mvm_tcm_iter()
1093 static void iwl_mvm_tcm_results(struct iwl_mvm *mvm) in iwl_mvm_tcm_results() argument
1096 .mvm = mvm, in iwl_mvm_tcm_results()
1100 mutex_lock(&mvm->mutex); in iwl_mvm_tcm_results()
1103 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, in iwl_mvm_tcm_results()
1106 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) in iwl_mvm_tcm_results()
1107 iwl_mvm_config_scan(mvm); in iwl_mvm_tcm_results()
1109 mutex_unlock(&mvm->mutex); in iwl_mvm_tcm_results()
1114 struct iwl_mvm *mvm; in iwl_mvm_tcm_uapsd_nonagg_detected_wk() local
1121 mvm = mvmvif->mvm; in iwl_mvm_tcm_uapsd_nonagg_detected_wk()
1123 if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions) in iwl_mvm_tcm_uapsd_nonagg_detected_wk()
1127 memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr, in iwl_mvm_tcm_uapsd_nonagg_detected_wk()
1129 mvm->uapsd_noagg_bssid_write_idx++; in iwl_mvm_tcm_uapsd_nonagg_detected_wk()
1130 if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN) in iwl_mvm_tcm_uapsd_nonagg_detected_wk()
1131 mvm->uapsd_noagg_bssid_write_idx = 0; in iwl_mvm_tcm_uapsd_nonagg_detected_wk()
1133 iwl_mvm_connection_loss(mvm, vif, in iwl_mvm_tcm_uapsd_nonagg_detected_wk()
1137 static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm, in iwl_mvm_uapsd_agg_disconnect() argument
1154 if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected) in iwl_mvm_uapsd_agg_disconnect()
1157 mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true; in iwl_mvm_uapsd_agg_disconnect()
1158 IWL_INFO(mvm, in iwl_mvm_uapsd_agg_disconnect()
1163 static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm, in iwl_mvm_check_uapsd_agg_expected_tpt() argument
1167 u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes; in iwl_mvm_check_uapsd_agg_expected_tpt()
1172 rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate); in iwl_mvm_check_uapsd_agg_expected_tpt()
1174 if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions || in iwl_mvm_check_uapsd_agg_expected_tpt()
1175 mvm->tcm.data[mac].uapsd_nonagg_detect.detected) in iwl_mvm_check_uapsd_agg_expected_tpt()
1178 if (iwl_mvm_has_new_rx_api(mvm)) { in iwl_mvm_check_uapsd_agg_expected_tpt()
1200 vif = rcu_dereference(mvm->vif_id_to_mac[mac]); in iwl_mvm_check_uapsd_agg_expected_tpt()
1202 iwl_mvm_uapsd_agg_disconnect(mvm, vif); in iwl_mvm_check_uapsd_agg_expected_tpt()
1218 static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, in iwl_mvm_calc_tcm_stats() argument
1222 unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts); in iwl_mvm_calc_tcm_stats()
1224 jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts); in iwl_mvm_calc_tcm_stats()
1231 bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD); in iwl_mvm_calc_tcm_stats()
1234 mvm->tcm.ll_ts = ts; in iwl_mvm_calc_tcm_stats()
1236 mvm->tcm.uapsd_nonagg_ts = ts; in iwl_mvm_calc_tcm_stats()
1238 mvm->tcm.result.elapsed = elapsed; in iwl_mvm_calc_tcm_stats()
1240 ieee80211_iterate_active_interfaces_atomic(mvm->hw, in iwl_mvm_calc_tcm_stats()
1246 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; in iwl_mvm_calc_tcm_stats()
1253 load = iwl_mvm_tcm_load(mvm, airtime, elapsed); in iwl_mvm_calc_tcm_stats()
1254 mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac]; in iwl_mvm_calc_tcm_stats()
1255 mvm->tcm.result.load[mac] = load; in iwl_mvm_calc_tcm_stats()
1256 mvm->tcm.result.airtime[mac] = airtime; in iwl_mvm_calc_tcm_stats()
1264 mvm->tcm.result.low_latency[mac] = true; in iwl_mvm_calc_tcm_stats()
1266 mvm->tcm.result.low_latency[mac] = false; in iwl_mvm_calc_tcm_stats()
1273 low_latency |= mvm->tcm.result.low_latency[mac]; in iwl_mvm_calc_tcm_stats()
1275 if (!mvm->tcm.result.low_latency[mac] && handle_uapsd) in iwl_mvm_calc_tcm_stats()
1276 iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed, in iwl_mvm_calc_tcm_stats()
1285 load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed); in iwl_mvm_calc_tcm_stats()
1286 mvm->tcm.result.global_change = load != mvm->tcm.result.global_load; in iwl_mvm_calc_tcm_stats()
1287 mvm->tcm.result.global_load = load; in iwl_mvm_calc_tcm_stats()
1290 band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed); in iwl_mvm_calc_tcm_stats()
1291 mvm->tcm.result.band_load[i] = band_load; in iwl_mvm_calc_tcm_stats()
1323 void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm) in iwl_mvm_recalc_tcm() argument
1327 time_after(ts, mvm->tcm.uapsd_nonagg_ts + in iwl_mvm_recalc_tcm()
1330 spin_lock(&mvm->tcm.lock); in iwl_mvm_recalc_tcm()
1331 if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { in iwl_mvm_recalc_tcm()
1332 spin_unlock(&mvm->tcm.lock); in iwl_mvm_recalc_tcm()
1335 spin_unlock(&mvm->tcm.lock); in iwl_mvm_recalc_tcm()
1337 if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) { in iwl_mvm_recalc_tcm()
1338 mutex_lock(&mvm->mutex); in iwl_mvm_recalc_tcm()
1339 if (iwl_mvm_request_statistics(mvm, true)) in iwl_mvm_recalc_tcm()
1341 mutex_unlock(&mvm->mutex); in iwl_mvm_recalc_tcm()
1344 spin_lock(&mvm->tcm.lock); in iwl_mvm_recalc_tcm()
1346 if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { in iwl_mvm_recalc_tcm()
1348 unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts, in iwl_mvm_recalc_tcm()
1353 mvm->tcm.ts = ts; in iwl_mvm_recalc_tcm()
1355 schedule_delayed_work(&mvm->tcm.work, work_delay); in iwl_mvm_recalc_tcm()
1357 spin_unlock(&mvm->tcm.lock); in iwl_mvm_recalc_tcm()
1359 iwl_mvm_tcm_results(mvm); in iwl_mvm_recalc_tcm()
1365 struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm, in iwl_mvm_tcm_work() local
1368 iwl_mvm_recalc_tcm(mvm); in iwl_mvm_tcm_work()
1371 void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel) in iwl_mvm_pause_tcm() argument
1373 spin_lock_bh(&mvm->tcm.lock); in iwl_mvm_pause_tcm()
1374 mvm->tcm.paused = true; in iwl_mvm_pause_tcm()
1375 spin_unlock_bh(&mvm->tcm.lock); in iwl_mvm_pause_tcm()
1377 cancel_delayed_work_sync(&mvm->tcm.work); in iwl_mvm_pause_tcm()
1380 void iwl_mvm_resume_tcm(struct iwl_mvm *mvm) in iwl_mvm_resume_tcm() argument
1385 spin_lock_bh(&mvm->tcm.lock); in iwl_mvm_resume_tcm()
1386 mvm->tcm.ts = jiffies; in iwl_mvm_resume_tcm()
1387 mvm->tcm.ll_ts = jiffies; in iwl_mvm_resume_tcm()
1389 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; in iwl_mvm_resume_tcm()
1396 if (mvm->tcm.result.low_latency[mac]) in iwl_mvm_resume_tcm()
1401 mvm->tcm.paused = false; in iwl_mvm_resume_tcm()
1407 if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW) in iwl_mvm_resume_tcm()
1408 schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD); in iwl_mvm_resume_tcm()
1410 schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD); in iwl_mvm_resume_tcm()
1412 spin_unlock_bh(&mvm->tcm.lock); in iwl_mvm_resume_tcm()
1415 void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_tcm_add_vif() argument
1423 void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_tcm_rm_vif() argument
1430 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm) in iwl_mvm_get_systime() argument
1434 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 && in iwl_mvm_get_systime()
1435 mvm->trans->cfg->gp2_reg_addr) in iwl_mvm_get_systime()
1436 reg_addr = mvm->trans->cfg->gp2_reg_addr; in iwl_mvm_get_systime()
1438 return iwl_read_prph(mvm->trans, reg_addr); in iwl_mvm_get_systime()
1441 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime) in iwl_mvm_get_sync_time() argument
1445 lockdep_assert_held(&mvm->mutex); in iwl_mvm_get_sync_time()
1448 ps_disabled = mvm->ps_disabled; in iwl_mvm_get_sync_time()
1450 mvm->ps_disabled = true; in iwl_mvm_get_sync_time()
1451 iwl_mvm_power_update_device(mvm); in iwl_mvm_get_sync_time()
1454 *gp2 = iwl_mvm_get_systime(mvm); in iwl_mvm_get_sync_time()
1458 mvm->ps_disabled = ps_disabled; in iwl_mvm_get_sync_time()
1459 iwl_mvm_power_update_device(mvm); in iwl_mvm_get_sync_time()