Lines Matching refs:mvm

18 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)  in iwl_mvm_add_sta_cmd_size()  argument
20 if (iwl_mvm_has_new_rx_api(mvm) || in iwl_mvm_add_sta_cmd_size()
21 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) in iwl_mvm_add_sta_cmd_size()
27 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, in iwl_mvm_find_free_sta_id() argument
34 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); in iwl_mvm_find_free_sta_id()
36 lockdep_assert_held(&mvm->mutex); in iwl_mvm_find_free_sta_id()
43 for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) { in iwl_mvm_find_free_sta_id()
47 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], in iwl_mvm_find_free_sta_id()
48 lockdep_is_held(&mvm->mutex))) in iwl_mvm_find_free_sta_id()
55 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, in iwl_mvm_sta_send_to_fw() argument
72 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) in iwl_mvm_sta_send_to_fw()
78 if (!iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_sta_send_to_fw()
203 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, in iwl_mvm_sta_send_to_fw()
204 iwl_mvm_add_sta_cmd_size(mvm), in iwl_mvm_sta_send_to_fw()
211 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); in iwl_mvm_sta_send_to_fw()
215 IWL_ERR(mvm, "ADD_STA failed\n"); in iwl_mvm_sta_send_to_fw()
249 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); in iwl_mvm_rx_agg_session_expired()
270 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, in iwl_mvm_invalidate_sta_queue() argument
280 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_invalidate_sta_queue()
283 sta_id = mvm->queue_info[queue].ra_sta_id; in iwl_mvm_invalidate_sta_queue()
287 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_invalidate_sta_queue()
313 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, in iwl_mvm_invalidate_sta_queue()
314 iwl_mvm_add_sta_cmd_size(mvm), in iwl_mvm_invalidate_sta_queue()
318 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, in iwl_mvm_disable_txq() argument
328 lockdep_assert_held(&mvm->mutex); in iwl_mvm_disable_txq()
330 if (iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_disable_txq()
331 if (mvm->sta_remove_requires_queue_remove) { in iwl_mvm_disable_txq()
339 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, in iwl_mvm_disable_txq()
346 iwl_trans_txq_free(mvm->trans, queue); in iwl_mvm_disable_txq()
352 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) in iwl_mvm_disable_txq()
355 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); in iwl_mvm_disable_txq()
357 cmd.action = mvm->queue_info[queue].tid_bitmap ? in iwl_mvm_disable_txq()
360 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; in iwl_mvm_disable_txq()
362 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_disable_txq()
365 mvm->queue_info[queue].tid_bitmap); in iwl_mvm_disable_txq()
371 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; in iwl_mvm_disable_txq()
372 cmd.tid = mvm->queue_info[queue].txq_tid; in iwl_mvm_disable_txq()
375 WARN(mvm->queue_info[queue].tid_bitmap, in iwl_mvm_disable_txq()
377 queue, mvm->queue_info[queue].tid_bitmap); in iwl_mvm_disable_txq()
380 mvm->queue_info[queue].tid_bitmap = 0; in iwl_mvm_disable_txq()
391 mvm->queue_info[queue].reserved = false; in iwl_mvm_disable_txq()
393 iwl_trans_txq_disable(mvm->trans, queue, false); in iwl_mvm_disable_txq()
394 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, in iwl_mvm_disable_txq()
398 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", in iwl_mvm_disable_txq()
403 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) in iwl_mvm_get_queue_agg_tids() argument
412 lockdep_assert_held(&mvm->mutex); in iwl_mvm_get_queue_agg_tids()
414 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_get_queue_agg_tids()
417 sta_id = mvm->queue_info[queue].ra_sta_id; in iwl_mvm_get_queue_agg_tids()
418 tid_bitmap = mvm->queue_info[queue].tid_bitmap; in iwl_mvm_get_queue_agg_tids()
420 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], in iwl_mvm_get_queue_agg_tids()
421 lockdep_is_held(&mvm->mutex)); in iwl_mvm_get_queue_agg_tids()
443 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) in iwl_mvm_remove_sta_queue_marking() argument
452 lockdep_assert_held(&mvm->mutex); in iwl_mvm_remove_sta_queue_marking()
454 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_remove_sta_queue_marking()
457 sta_id = mvm->queue_info[queue].ra_sta_id; in iwl_mvm_remove_sta_queue_marking()
458 tid_bitmap = mvm->queue_info[queue].tid_bitmap; in iwl_mvm_remove_sta_queue_marking()
462 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_remove_sta_queue_marking()
503 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, in iwl_mvm_free_inactive_queue() argument
514 lockdep_assert_held(&mvm->mutex); in iwl_mvm_free_inactive_queue()
516 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_free_inactive_queue()
519 sta_id = mvm->queue_info[queue].ra_sta_id; in iwl_mvm_free_inactive_queue()
520 tid = mvm->queue_info[queue].txq_tid; in iwl_mvm_free_inactive_queue()
524 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); in iwl_mvm_free_inactive_queue()
528 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); in iwl_mvm_free_inactive_queue()
531 iwl_mvm_invalidate_sta_queue(mvm, queue, in iwl_mvm_free_inactive_queue()
534 ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid); in iwl_mvm_free_inactive_queue()
536 IWL_ERR(mvm, in iwl_mvm_free_inactive_queue()
545 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); in iwl_mvm_free_inactive_queue()
550 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, in iwl_mvm_get_shared_queue() argument
561 lockdep_assert_held(&mvm->mutex); in iwl_mvm_get_shared_queue()
563 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_get_shared_queue()
575 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; in iwl_mvm_get_shared_queue()
608 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && in iwl_mvm_get_shared_queue()
609 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && in iwl_mvm_get_shared_queue()
611 IWL_ERR(mvm, "No DATA queues available to share\n"); in iwl_mvm_get_shared_queue()
619 static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, in iwl_mvm_reconfig_scd() argument
635 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_reconfig_scd()
638 if (WARN(mvm->queue_info[queue].tid_bitmap == 0, in iwl_mvm_reconfig_scd()
642 IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); in iwl_mvm_reconfig_scd()
644 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); in iwl_mvm_reconfig_scd()
657 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, in iwl_mvm_redirect_queue() argument
668 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_redirect_queue()
679 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { in iwl_mvm_redirect_queue()
680 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_redirect_queue()
686 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; in iwl_mvm_redirect_queue()
687 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; in iwl_mvm_redirect_queue()
688 cmd.tid = mvm->queue_info[queue].txq_tid; in iwl_mvm_redirect_queue()
689 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; in iwl_mvm_redirect_queue()
691 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", in iwl_mvm_redirect_queue()
697 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); in iwl_mvm_redirect_queue()
699 IWL_ERR(mvm, "Error draining queue %d before reconfig\n", in iwl_mvm_redirect_queue()
706 iwl_trans_txq_disable(mvm->trans, queue, false); in iwl_mvm_redirect_queue()
707 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); in iwl_mvm_redirect_queue()
709 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, in iwl_mvm_redirect_queue()
713 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); in iwl_mvm_redirect_queue()
716 mvm->queue_info[queue].txq_tid = tid; in iwl_mvm_redirect_queue()
721 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], in iwl_mvm_redirect_queue()
725 mvm->queue_info[queue].mac80211_ac = ac; in iwl_mvm_redirect_queue()
734 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); in iwl_mvm_redirect_queue()
743 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, in iwl_mvm_find_free_queue() argument
748 lockdep_assert_held(&mvm->mutex); in iwl_mvm_find_free_queue()
750 if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues, in iwl_mvm_find_free_queue()
752 mvm->trans->trans_cfg->base_params->num_of_queues)) in iwl_mvm_find_free_queue()
753 maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1; in iwl_mvm_find_free_queue()
756 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_find_free_queue()
761 if (mvm->queue_info[i].tid_bitmap == 0 && in iwl_mvm_find_free_queue()
762 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) in iwl_mvm_find_free_queue()
768 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, in iwl_mvm_tvqm_enable_txq() argument
776 mvm->trans->cfg->min_txq_size); in iwl_mvm_tvqm_enable_txq()
781 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_tvqm_enable_txq()
797 size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16)); in iwl_mvm_tvqm_enable_txq()
803 queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id), in iwl_mvm_tvqm_enable_txq()
807 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_tvqm_enable_txq()
816 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", in iwl_mvm_tvqm_enable_txq()
822 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, in iwl_mvm_sta_alloc_queue_tvqm() argument
830 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); in iwl_mvm_sta_alloc_queue_tvqm()
833 lockdep_assert_held(&mvm->mutex); in iwl_mvm_sta_alloc_queue_tvqm()
835 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_sta_alloc_queue_tvqm()
838 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout); in iwl_mvm_sta_alloc_queue_tvqm()
843 mvm->tvqm_info[queue].txq_tid = tid; in iwl_mvm_sta_alloc_queue_tvqm()
844 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id; in iwl_mvm_sta_alloc_queue_tvqm()
846 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); in iwl_mvm_sta_alloc_queue_tvqm()
855 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, in iwl_mvm_update_txq_mapping() argument
862 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { in iwl_mvm_update_txq_mapping()
863 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", in iwl_mvm_update_txq_mapping()
869 if (mvm->queue_info[queue].tid_bitmap) in iwl_mvm_update_txq_mapping()
872 mvm->queue_info[queue].tid_bitmap |= BIT(tid); in iwl_mvm_update_txq_mapping()
873 mvm->queue_info[queue].ra_sta_id = sta_id; in iwl_mvm_update_txq_mapping()
877 mvm->queue_info[queue].mac80211_ac = in iwl_mvm_update_txq_mapping()
880 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; in iwl_mvm_update_txq_mapping()
882 mvm->queue_info[queue].txq_tid = tid; in iwl_mvm_update_txq_mapping()
892 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_update_txq_mapping()
894 queue, mvm->queue_info[queue].tid_bitmap); in iwl_mvm_update_txq_mapping()
899 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, in iwl_mvm_enable_txq() argument
916 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_enable_txq()
920 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) in iwl_mvm_enable_txq()
923 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, in iwl_mvm_enable_txq()
928 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), in iwl_mvm_enable_txq()
934 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) in iwl_mvm_change_queue_tid() argument
944 lockdep_assert_held(&mvm->mutex); in iwl_mvm_change_queue_tid()
946 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_change_queue_tid()
949 tid_bitmap = mvm->queue_info[queue].tid_bitmap; in iwl_mvm_change_queue_tid()
959 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); in iwl_mvm_change_queue_tid()
961 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", in iwl_mvm_change_queue_tid()
966 mvm->queue_info[queue].txq_tid = tid; in iwl_mvm_change_queue_tid()
967 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", in iwl_mvm_change_queue_tid()
971 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) in iwl_mvm_unshare_queue() argument
983 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_unshare_queue()
986 lockdep_assert_held(&mvm->mutex); in iwl_mvm_unshare_queue()
988 sta_id = mvm->queue_info[queue].ra_sta_id; in iwl_mvm_unshare_queue()
989 tid_bitmap = mvm->queue_info[queue].tid_bitmap; in iwl_mvm_unshare_queue()
994 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", in iwl_mvm_unshare_queue()
999 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, in iwl_mvm_unshare_queue()
1002 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], in iwl_mvm_unshare_queue()
1003 lockdep_is_held(&mvm->mutex)); in iwl_mvm_unshare_queue()
1009 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); in iwl_mvm_unshare_queue()
1013 ret = iwl_mvm_redirect_queue(mvm, queue, tid, in iwl_mvm_unshare_queue()
1018 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); in iwl_mvm_unshare_queue()
1035 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, in iwl_mvm_unshare_queue()
1036 iwl_mvm_add_sta_cmd_size(mvm), &cmd); in iwl_mvm_unshare_queue()
1038 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_unshare_queue()
1043 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); in iwl_mvm_unshare_queue()
1047 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; in iwl_mvm_unshare_queue()
1057 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, in iwl_mvm_remove_inactive_tids() argument
1066 lockdep_assert_held(&mvm->mutex); in iwl_mvm_remove_inactive_tids()
1068 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_remove_inactive_tids()
1074 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) in iwl_mvm_remove_inactive_tids()
1083 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { in iwl_mvm_remove_inactive_tids()
1084 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); in iwl_mvm_remove_inactive_tids()
1096 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); in iwl_mvm_remove_inactive_tids()
1098 q_tid_bitmap = mvm->queue_info[queue].tid_bitmap; in iwl_mvm_remove_inactive_tids()
1111 if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) in iwl_mvm_remove_inactive_tids()
1114 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_remove_inactive_tids()
1119 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_remove_inactive_tids()
1121 mvm->queue_info[queue].tid_bitmap); in iwl_mvm_remove_inactive_tids()
1127 tid_bitmap = mvm->queue_info[queue].tid_bitmap; in iwl_mvm_remove_inactive_tids()
1130 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && in iwl_mvm_remove_inactive_tids()
1131 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { in iwl_mvm_remove_inactive_tids()
1132 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", in iwl_mvm_remove_inactive_tids()
1149 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) in iwl_mvm_inactivity_check() argument
1157 lockdep_assert_held(&mvm->mutex); in iwl_mvm_inactivity_check()
1159 if (iwl_mvm_has_new_tx_api(mvm)) in iwl_mvm_inactivity_check()
1175 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; in iwl_mvm_inactivity_check()
1180 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && in iwl_mvm_inactivity_check()
1181 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) in iwl_mvm_inactivity_check()
1187 if (time_after(mvm->queue_info[i].last_frame_time[tid] + in iwl_mvm_inactivity_check()
1203 sta_id = mvm->queue_info[i].ra_sta_id; in iwl_mvm_inactivity_check()
1204 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_inactivity_check()
1217 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, in iwl_mvm_inactivity_check()
1232 iwl_mvm_unshare_queue(mvm, i); in iwl_mvm_inactivity_check()
1234 iwl_mvm_change_queue_tid(mvm, i); in iwl_mvm_inactivity_check()
1239 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, in iwl_mvm_inactivity_check()
1248 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, in iwl_mvm_sta_alloc_queue() argument
1253 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), in iwl_mvm_sta_alloc_queue()
1259 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); in iwl_mvm_sta_alloc_queue()
1269 lockdep_assert_held(&mvm->mutex); in iwl_mvm_sta_alloc_queue()
1271 if (iwl_mvm_has_new_tx_api(mvm)) in iwl_mvm_sta_alloc_queue()
1272 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); in iwl_mvm_sta_alloc_queue()
1280 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, in iwl_mvm_sta_alloc_queue()
1284 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", in iwl_mvm_sta_alloc_queue()
1291 (mvm->queue_info[mvmsta->reserved_queue].status == in iwl_mvm_sta_alloc_queue()
1294 mvm->queue_info[queue].reserved = true; in iwl_mvm_sta_alloc_queue()
1295 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); in iwl_mvm_sta_alloc_queue()
1299 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, in iwl_mvm_sta_alloc_queue()
1304 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); in iwl_mvm_sta_alloc_queue()
1309 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); in iwl_mvm_sta_alloc_queue()
1312 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; in iwl_mvm_sta_alloc_queue()
1323 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; in iwl_mvm_sta_alloc_queue()
1327 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", in iwl_mvm_sta_alloc_queue()
1341 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_sta_alloc_queue()
1348 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); in iwl_mvm_sta_alloc_queue()
1351 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", in iwl_mvm_sta_alloc_queue()
1353 iwl_mvm_invalidate_sta_queue(mvm, queue, in iwl_mvm_sta_alloc_queue()
1358 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); in iwl_mvm_sta_alloc_queue()
1367 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); in iwl_mvm_sta_alloc_queue()
1388 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); in iwl_mvm_sta_alloc_queue()
1394 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); in iwl_mvm_sta_alloc_queue()
1400 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, in iwl_mvm_sta_alloc_queue()
1411 iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid); in iwl_mvm_sta_alloc_queue()
1418 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, in iwl_mvm_add_new_dqa_stream_wk() local
1421 mutex_lock(&mvm->mutex); in iwl_mvm_add_new_dqa_stream_wk()
1423 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); in iwl_mvm_add_new_dqa_stream_wk()
1425 while (!list_empty(&mvm->add_stream_txqs)) { in iwl_mvm_add_new_dqa_stream_wk()
1430 mvmtxq = list_first_entry(&mvm->add_stream_txqs, in iwl_mvm_add_new_dqa_stream_wk()
1445 if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) { in iwl_mvm_add_new_dqa_stream_wk()
1452 iwl_mvm_mac_itxq_xmit(mvm->hw, txq); in iwl_mvm_add_new_dqa_stream_wk()
1456 mutex_unlock(&mvm->mutex); in iwl_mvm_add_new_dqa_stream_wk()
1459 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, in iwl_mvm_reserve_sta_stream() argument
1467 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_reserve_sta_stream()
1471 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); in iwl_mvm_reserve_sta_stream()
1475 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && in iwl_mvm_reserve_sta_stream()
1476 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == in iwl_mvm_reserve_sta_stream()
1480 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, in iwl_mvm_reserve_sta_stream()
1485 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); in iwl_mvm_reserve_sta_stream()
1487 IWL_ERR(mvm, "No available queues for new station\n"); in iwl_mvm_reserve_sta_stream()
1491 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; in iwl_mvm_reserve_sta_stream()
1495 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", in iwl_mvm_reserve_sta_stream()
1508 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, in iwl_mvm_realloc_queues_after_restart() argument
1513 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); in iwl_mvm_realloc_queues_after_restart()
1522 mvm->queue_info[mvm_sta->reserved_queue].status = in iwl_mvm_realloc_queues_after_restart()
1535 if (iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_realloc_queues_after_restart()
1536 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_realloc_queues_after_restart()
1539 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, in iwl_mvm_realloc_queues_after_restart()
1561 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); in iwl_mvm_realloc_queues_after_restart()
1566 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_realloc_queues_after_restart()
1570 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg); in iwl_mvm_realloc_queues_after_restart()
1571 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; in iwl_mvm_realloc_queues_after_restart()
1576 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, in iwl_mvm_add_int_sta_common() argument
1585 lockdep_assert_held(&mvm->mutex); in iwl_mvm_add_int_sta_common()
1590 if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 && in iwl_mvm_add_int_sta_common()
1597 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) in iwl_mvm_add_int_sta_common()
1600 if (!iwl_mvm_has_new_tx_api(mvm)) in iwl_mvm_add_int_sta_common()
1607 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, in iwl_mvm_add_int_sta_common()
1608 iwl_mvm_add_sta_cmd_size(mvm), in iwl_mvm_add_int_sta_common()
1615 IWL_DEBUG_INFO(mvm, "Internal station added.\n"); in iwl_mvm_add_int_sta_common()
1619 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", in iwl_mvm_add_int_sta_common()
1626 int iwl_mvm_add_sta(struct iwl_mvm *mvm, in iwl_mvm_add_sta() argument
1637 lockdep_assert_held(&mvm->mutex); in iwl_mvm_add_sta()
1639 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) in iwl_mvm_add_sta()
1640 sta_id = iwl_mvm_find_free_sta_id(mvm, in iwl_mvm_add_sta()
1651 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { in iwl_mvm_add_sta()
1661 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, in iwl_mvm_add_sta()
1666 iwl_mvm_realloc_queues_after_restart(mvm, sta); in iwl_mvm_add_sta()
1668 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; in iwl_mvm_add_sta()
1676 if (!mvm->trans->trans_cfg->gen2) in iwl_mvm_add_sta()
1712 if (iwl_mvm_has_new_rx_api(mvm) && in iwl_mvm_add_sta()
1713 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { in iwl_mvm_add_sta()
1716 dup_data = kcalloc(mvm->trans->num_rx_queues, in iwl_mvm_add_sta()
1729 for (q = 0; q < mvm->trans->num_rx_queues; q++) in iwl_mvm_add_sta()
1735 if (!iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_add_sta()
1736 ret = iwl_mvm_reserve_sta_stream(mvm, sta, in iwl_mvm_add_sta()
1746 if (iwl_mvm_has_tlc_offload(mvm)) in iwl_mvm_add_sta()
1747 iwl_mvm_rs_add_sta(mvm, mvm_sta); in iwl_mvm_add_sta()
1751 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); in iwl_mvm_add_sta()
1754 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); in iwl_mvm_add_sta()
1767 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); in iwl_mvm_add_sta()
1775 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, in iwl_mvm_drain_sta() argument
1782 lockdep_assert_held(&mvm->mutex); in iwl_mvm_drain_sta()
1791 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, in iwl_mvm_drain_sta()
1792 iwl_mvm_add_sta_cmd_size(mvm), in iwl_mvm_drain_sta()
1799 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", in iwl_mvm_drain_sta()
1804 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", in iwl_mvm_drain_sta()
1817 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) in iwl_mvm_rm_sta_common() argument
1825 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], in iwl_mvm_rm_sta_common()
1826 lockdep_is_held(&mvm->mutex)); in iwl_mvm_rm_sta_common()
1830 IWL_ERR(mvm, "Invalid station id\n"); in iwl_mvm_rm_sta_common()
1834 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, in iwl_mvm_rm_sta_common()
1837 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); in iwl_mvm_rm_sta_common()
1844 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, in iwl_mvm_disable_sta_queues() argument
1851 lockdep_assert_held(&mvm->mutex); in iwl_mvm_disable_sta_queues()
1857 iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i); in iwl_mvm_disable_sta_queues()
1870 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, in iwl_mvm_wait_sta_queues_empty() argument
1886 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); in iwl_mvm_wait_sta_queues_empty()
1894 int iwl_mvm_rm_sta(struct iwl_mvm *mvm, in iwl_mvm_rm_sta() argument
1903 lockdep_assert_held(&mvm->mutex); in iwl_mvm_rm_sta()
1905 if (iwl_mvm_has_new_rx_api(mvm)) in iwl_mvm_rm_sta()
1908 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); in iwl_mvm_rm_sta()
1913 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false); in iwl_mvm_rm_sta()
1916 if (iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_rm_sta()
1917 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); in iwl_mvm_rm_sta()
1921 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, in iwl_mvm_rm_sta()
1927 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); in iwl_mvm_rm_sta()
1929 iwl_mvm_disable_sta_queues(mvm, vif, sta); in iwl_mvm_rm_sta()
1941 status = &mvm->queue_info[reserved_txq].status; in iwl_mvm_rm_sta()
1965 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { in iwl_mvm_rm_sta()
1966 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; in iwl_mvm_rm_sta()
1967 cancel_delayed_work(&mvm->tdls_cs.dwork); in iwl_mvm_rm_sta()
1977 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); in iwl_mvm_rm_sta()
1978 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); in iwl_mvm_rm_sta()
1983 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, in iwl_mvm_rm_sta_id() argument
1987 int ret = iwl_mvm_rm_sta_common(mvm, sta_id); in iwl_mvm_rm_sta_id()
1989 lockdep_assert_held(&mvm->mutex); in iwl_mvm_rm_sta_id()
1991 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); in iwl_mvm_rm_sta_id()
1995 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, in iwl_mvm_allocate_int_sta() argument
2000 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || in iwl_mvm_allocate_int_sta()
2002 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); in iwl_mvm_allocate_int_sta()
2011 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); in iwl_mvm_allocate_int_sta()
2015 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) in iwl_mvm_dealloc_int_sta() argument
2017 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); in iwl_mvm_dealloc_int_sta()
2022 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, in iwl_mvm_enable_aux_snif_queue() argument
2026 mvm->trans->trans_cfg->base_params->wd_timeout; in iwl_mvm_enable_aux_snif_queue()
2035 WARN_ON(iwl_mvm_has_new_tx_api(mvm)); in iwl_mvm_enable_aux_snif_queue()
2037 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); in iwl_mvm_enable_aux_snif_queue()
2040 static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id) in iwl_mvm_enable_aux_snif_queue_tvqm() argument
2043 mvm->trans->trans_cfg->base_params->wd_timeout; in iwl_mvm_enable_aux_snif_queue_tvqm()
2045 WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); in iwl_mvm_enable_aux_snif_queue_tvqm()
2047 return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT, in iwl_mvm_enable_aux_snif_queue_tvqm()
2051 static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, in iwl_mvm_add_int_sta_with_queue() argument
2059 if (!iwl_mvm_has_new_tx_api(mvm)) in iwl_mvm_add_int_sta_with_queue()
2060 iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo); in iwl_mvm_add_int_sta_with_queue()
2062 ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor); in iwl_mvm_add_int_sta_with_queue()
2064 if (!iwl_mvm_has_new_tx_api(mvm)) in iwl_mvm_add_int_sta_with_queue()
2065 iwl_mvm_disable_txq(mvm, NULL, queue, in iwl_mvm_add_int_sta_with_queue()
2074 if (iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_add_int_sta_with_queue()
2077 txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id); in iwl_mvm_add_int_sta_with_queue()
2079 iwl_mvm_rm_sta_common(mvm, sta->sta_id); in iwl_mvm_add_int_sta_with_queue()
2089 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id) in iwl_mvm_add_aux_sta() argument
2093 lockdep_assert_held(&mvm->mutex); in iwl_mvm_add_aux_sta()
2096 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), in iwl_mvm_add_aux_sta()
2106 ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL, in iwl_mvm_add_aux_sta()
2107 &mvm->aux_sta, &mvm->aux_queue, in iwl_mvm_add_aux_sta()
2110 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); in iwl_mvm_add_aux_sta()
2117 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_add_snif_sta() argument
2121 lockdep_assert_held(&mvm->mutex); in iwl_mvm_add_snif_sta()
2123 return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, in iwl_mvm_add_snif_sta()
2124 NULL, &mvm->snif_sta, in iwl_mvm_add_snif_sta()
2125 &mvm->snif_queue, in iwl_mvm_add_snif_sta()
2129 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_rm_snif_sta() argument
2133 lockdep_assert_held(&mvm->mutex); in iwl_mvm_rm_snif_sta()
2135 if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) in iwl_mvm_rm_snif_sta()
2138 iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT); in iwl_mvm_rm_snif_sta()
2139 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); in iwl_mvm_rm_snif_sta()
2141 IWL_WARN(mvm, "Failed sending remove station\n"); in iwl_mvm_rm_snif_sta()
2146 int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) in iwl_mvm_rm_aux_sta() argument
2150 lockdep_assert_held(&mvm->mutex); in iwl_mvm_rm_aux_sta()
2152 if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) in iwl_mvm_rm_aux_sta()
2155 iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT); in iwl_mvm_rm_aux_sta()
2156 ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); in iwl_mvm_rm_aux_sta()
2158 IWL_WARN(mvm, "Failed sending remove station\n"); in iwl_mvm_rm_aux_sta()
2159 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); in iwl_mvm_rm_aux_sta()
2164 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) in iwl_mvm_dealloc_snif_sta() argument
2166 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); in iwl_mvm_dealloc_snif_sta()
2177 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_send_add_bcast_sta() argument
2186 iwl_mvm_get_wd_timeout(mvm, vif, false, false); in iwl_mvm_send_add_bcast_sta()
2195 lockdep_assert_held(&mvm->mutex); in iwl_mvm_send_add_bcast_sta()
2197 if (!iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_send_add_bcast_sta()
2200 queue = mvm->probe_queue; in iwl_mvm_send_add_bcast_sta()
2202 queue = mvm->p2p_dev_queue; in iwl_mvm_send_add_bcast_sta()
2210 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); in iwl_mvm_send_add_bcast_sta()
2219 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, in iwl_mvm_send_add_bcast_sta()
2228 if (iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_send_add_bcast_sta()
2229 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, in iwl_mvm_send_add_bcast_sta()
2233 iwl_mvm_rm_sta_common(mvm, bsta->sta_id); in iwl_mvm_send_add_bcast_sta()
2239 mvm->probe_queue = queue; in iwl_mvm_send_add_bcast_sta()
2241 mvm->p2p_dev_queue = queue; in iwl_mvm_send_add_bcast_sta()
2247 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, in iwl_mvm_free_bcast_sta_queues() argument
2253 lockdep_assert_held(&mvm->mutex); in iwl_mvm_free_bcast_sta_queues()
2255 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); in iwl_mvm_free_bcast_sta_queues()
2260 queueptr = &mvm->probe_queue; in iwl_mvm_free_bcast_sta_queues()
2263 queueptr = &mvm->p2p_dev_queue; in iwl_mvm_free_bcast_sta_queues()
2272 iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT); in iwl_mvm_free_bcast_sta_queues()
2273 if (iwl_mvm_has_new_tx_api(mvm)) in iwl_mvm_free_bcast_sta_queues()
2282 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_send_rm_bcast_sta() argument
2287 lockdep_assert_held(&mvm->mutex); in iwl_mvm_send_rm_bcast_sta()
2289 iwl_mvm_free_bcast_sta_queues(mvm, vif); in iwl_mvm_send_rm_bcast_sta()
2291 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); in iwl_mvm_send_rm_bcast_sta()
2293 IWL_WARN(mvm, "Failed sending remove station\n"); in iwl_mvm_send_rm_bcast_sta()
2297 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_alloc_bcast_sta() argument
2301 lockdep_assert_held(&mvm->mutex); in iwl_mvm_alloc_bcast_sta()
2303 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, in iwl_mvm_alloc_bcast_sta()
2315 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_add_p2p_bcast_sta() argument
2321 lockdep_assert_held(&mvm->mutex); in iwl_mvm_add_p2p_bcast_sta()
2323 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); in iwl_mvm_add_p2p_bcast_sta()
2327 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); in iwl_mvm_add_p2p_bcast_sta()
2330 iwl_mvm_dealloc_int_sta(mvm, bsta); in iwl_mvm_add_p2p_bcast_sta()
2335 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_dealloc_bcast_sta() argument
2339 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); in iwl_mvm_dealloc_bcast_sta()
2346 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_rm_p2p_bcast_sta() argument
2350 lockdep_assert_held(&mvm->mutex); in iwl_mvm_rm_p2p_bcast_sta()
2352 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); in iwl_mvm_rm_p2p_bcast_sta()
2354 iwl_mvm_dealloc_bcast_sta(mvm, vif); in iwl_mvm_rm_p2p_bcast_sta()
2367 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_add_mcast_sta() argument
2381 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); in iwl_mvm_add_mcast_sta()
2384 lockdep_assert_held(&mvm->mutex); in iwl_mvm_add_mcast_sta()
2403 if (!iwl_mvm_has_new_tx_api(mvm) && in iwl_mvm_add_mcast_sta()
2404 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { in iwl_mvm_add_mcast_sta()
2405 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, in iwl_mvm_add_mcast_sta()
2409 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, in iwl_mvm_add_mcast_sta()
2421 if (iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_add_mcast_sta()
2422 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, in iwl_mvm_add_mcast_sta()
2430 } else if (!fw_has_api(&mvm->fw->ucode_capa, in iwl_mvm_add_mcast_sta()
2432 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, in iwl_mvm_add_mcast_sta()
2437 iwl_mvm_dealloc_int_sta(mvm, msta); in iwl_mvm_add_mcast_sta()
2441 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, in __iwl_mvm_remove_sta_key() argument
2449 bool new_api = fw_has_api(&mvm->fw->ucode_capa, in __iwl_mvm_remove_sta_key()
2478 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, in __iwl_mvm_remove_sta_key()
2483 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); in __iwl_mvm_remove_sta_key()
2487 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); in __iwl_mvm_remove_sta_key()
2498 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_rm_mcast_sta() argument
2503 lockdep_assert_held(&mvm->mutex); in iwl_mvm_rm_mcast_sta()
2505 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true); in iwl_mvm_rm_mcast_sta()
2507 iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0); in iwl_mvm_rm_mcast_sta()
2509 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); in iwl_mvm_rm_mcast_sta()
2511 IWL_WARN(mvm, "Failed sending remove station\n"); in iwl_mvm_rm_mcast_sta()
2516 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) in iwl_mvm_sync_rxq_del_ba() argument
2522 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true, in iwl_mvm_sync_rxq_del_ba()
2526 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, in iwl_mvm_free_reorder() argument
2531 iwl_mvm_sync_rxq_del_ba(mvm, data->baid); in iwl_mvm_free_reorder()
2533 for (i = 0; i < mvm->trans->num_rx_queues; i++) { in iwl_mvm_free_reorder()
2569 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, in iwl_mvm_init_reorder_buffer() argument
2575 for (i = 0; i < mvm->trans->num_rx_queues; i++) { in iwl_mvm_init_reorder_buffer()
2589 reorder_buf->mvm = mvm; in iwl_mvm_init_reorder_buffer()
2597 static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm, in iwl_mvm_fw_baid_op_sta() argument
2621 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, in iwl_mvm_fw_baid_op_sta()
2622 iwl_mvm_add_sta_cmd_size(mvm), in iwl_mvm_fw_baid_op_sta()
2629 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", in iwl_mvm_fw_baid_op_sta()
2631 if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) && in iwl_mvm_fw_baid_op_sta()
2636 IWL_WARN(mvm, "RX BA Session refused by fw\n"); in iwl_mvm_fw_baid_op_sta()
2639 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", in iwl_mvm_fw_baid_op_sta()
2645 static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, in iwl_mvm_fw_baid_op_cmd() argument
2665 } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) { in iwl_mvm_fw_baid_op_cmd()
2673 ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd), in iwl_mvm_fw_baid_op_cmd()
2683 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", in iwl_mvm_fw_baid_op_cmd()
2686 if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map)) in iwl_mvm_fw_baid_op_cmd()
2692 static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, in iwl_mvm_fw_baid_op() argument
2696 if (fw_has_capa(&mvm->fw->ucode_capa, in iwl_mvm_fw_baid_op()
2698 return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start, in iwl_mvm_fw_baid_op()
2701 return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start, in iwl_mvm_fw_baid_op()
2705 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, in iwl_mvm_sta_rx_agg() argument
2711 u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID : in iwl_mvm_sta_rx_agg()
2714 lockdep_assert_held(&mvm->mutex); in iwl_mvm_sta_rx_agg()
2716 if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) { in iwl_mvm_sta_rx_agg()
2717 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); in iwl_mvm_sta_rx_agg()
2721 if (iwl_mvm_has_new_rx_api(mvm) && start) { in iwl_mvm_sta_rx_agg()
2748 mvm->trans->num_rx_queues * in iwl_mvm_sta_rx_agg()
2762 if (iwl_mvm_has_new_rx_api(mvm) && !start) { in iwl_mvm_sta_rx_agg()
2770 if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) in iwl_mvm_sta_rx_agg()
2771 baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size, in iwl_mvm_sta_rx_agg()
2780 mvm->rx_ba_sessions++; in iwl_mvm_sta_rx_agg()
2782 if (!iwl_mvm_has_new_rx_api(mvm)) in iwl_mvm_sta_rx_agg()
2788 baid_data->rcu_ptr = &mvm->baid_map[baid]; in iwl_mvm_sta_rx_agg()
2791 baid_data->mvm = mvm; in iwl_mvm_sta_rx_agg()
2800 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size); in iwl_mvm_sta_rx_agg()
2807 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", in iwl_mvm_sta_rx_agg()
2809 WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); in iwl_mvm_sta_rx_agg()
2810 rcu_assign_pointer(mvm->baid_map[baid], baid_data); in iwl_mvm_sta_rx_agg()
2814 if (mvm->rx_ba_sessions > 0) in iwl_mvm_sta_rx_agg()
2816 mvm->rx_ba_sessions--; in iwl_mvm_sta_rx_agg()
2817 if (!iwl_mvm_has_new_rx_api(mvm)) in iwl_mvm_sta_rx_agg()
2823 baid_data = rcu_access_pointer(mvm->baid_map[baid]); in iwl_mvm_sta_rx_agg()
2828 iwl_mvm_free_reorder(mvm, baid_data); in iwl_mvm_sta_rx_agg()
2830 RCU_INIT_POINTER(mvm->baid_map[baid], NULL); in iwl_mvm_sta_rx_agg()
2832 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); in iwl_mvm_sta_rx_agg()
2841 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, in iwl_mvm_sta_rx_agg()
2851 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, in iwl_mvm_sta_tx_agg() argument
2859 lockdep_assert_held(&mvm->mutex); in iwl_mvm_sta_tx_agg()
2872 if (!iwl_mvm_has_new_tx_api(mvm)) in iwl_mvm_sta_tx_agg()
2879 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, in iwl_mvm_sta_tx_agg()
2880 iwl_mvm_add_sta_cmd_size(mvm), in iwl_mvm_sta_tx_agg()
2890 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", in iwl_mvm_sta_tx_agg()
2921 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, in iwl_mvm_sta_tx_agg_start() argument
2935 IWL_ERR(mvm, in iwl_mvm_sta_tx_agg_start()
2941 lockdep_assert_held(&mvm->mutex); in iwl_mvm_sta_tx_agg_start()
2944 iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_sta_tx_agg_start()
2947 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); in iwl_mvm_sta_tx_agg_start()
2962 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, in iwl_mvm_sta_tx_agg_start()
2966 IWL_ERR(mvm, "Failed to allocate agg queue\n"); in iwl_mvm_sta_tx_agg_start()
2973 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; in iwl_mvm_sta_tx_agg_start()
2976 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n", in iwl_mvm_sta_tx_agg_start()
2980 } else if (unlikely(mvm->queue_info[txq_id].status == in iwl_mvm_sta_tx_agg_start()
2983 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_sta_tx_agg_start()
2989 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_sta_tx_agg_start()
2998 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_sta_tx_agg_start()
3008 if (mvm->trans->trans_cfg->gen2) in iwl_mvm_sta_tx_agg_start()
3025 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, in iwl_mvm_sta_tx_agg_oper() argument
3032 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); in iwl_mvm_sta_tx_agg_oper()
3049 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) in iwl_mvm_sta_tx_agg_oper()
3064 if (iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_sta_tx_agg_oper()
3079 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); in iwl_mvm_sta_tx_agg_oper()
3087 queue_status = mvm->queue_info[queue].status; in iwl_mvm_sta_tx_agg_oper()
3090 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) in iwl_mvm_sta_tx_agg_oper()
3102 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, in iwl_mvm_sta_tx_agg_oper()
3105 IWL_ERR(mvm, in iwl_mvm_sta_tx_agg_oper()
3110 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, in iwl_mvm_sta_tx_agg_oper()
3114 IWL_ERR(mvm, in iwl_mvm_sta_tx_agg_oper()
3121 iwl_mvm_enable_txq(mvm, sta, queue, ssn, in iwl_mvm_sta_tx_agg_oper()
3126 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); in iwl_mvm_sta_tx_agg_oper()
3132 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; in iwl_mvm_sta_tx_agg_oper()
3146 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", in iwl_mvm_sta_tx_agg_oper()
3149 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq); in iwl_mvm_sta_tx_agg_oper()
3152 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, in iwl_mvm_unreserve_agg_queue() argument
3158 lockdep_assert_held(&mvm->mutex); in iwl_mvm_unreserve_agg_queue()
3160 if (iwl_mvm_has_new_tx_api(mvm)) in iwl_mvm_unreserve_agg_queue()
3170 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { in iwl_mvm_unreserve_agg_queue()
3171 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; in iwl_mvm_unreserve_agg_queue()
3176 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, in iwl_mvm_sta_tx_agg_stop() argument
3188 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { in iwl_mvm_sta_tx_agg_stop()
3197 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", in iwl_mvm_sta_tx_agg_stop()
3202 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); in iwl_mvm_sta_tx_agg_stop()
3208 IWL_DEBUG_TX_QUEUES(mvm, in iwl_mvm_sta_tx_agg_stop()
3218 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); in iwl_mvm_sta_tx_agg_stop()
3228 lockdep_assert_held(&mvm->mutex); in iwl_mvm_sta_tx_agg_stop()
3235 IWL_ERR(mvm, in iwl_mvm_sta_tx_agg_stop()
3238 IWL_ERR(mvm, in iwl_mvm_sta_tx_agg_stop()
3248 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, in iwl_mvm_sta_tx_agg_flush() argument
3262 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", in iwl_mvm_sta_tx_agg_flush()
3269 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); in iwl_mvm_sta_tx_agg_flush()
3272 iwl_mvm_drain_sta(mvm, mvmsta, true); in iwl_mvm_sta_tx_agg_flush()
3274 if (iwl_mvm_has_new_tx_api(mvm)) { in iwl_mvm_sta_tx_agg_flush()
3275 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id, in iwl_mvm_sta_tx_agg_flush()
3277 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); in iwl_mvm_sta_tx_agg_flush()
3278 iwl_trans_wait_txq_empty(mvm->trans, txq_id); in iwl_mvm_sta_tx_agg_flush()
3280 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id))) in iwl_mvm_sta_tx_agg_flush()
3281 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); in iwl_mvm_sta_tx_agg_flush()
3282 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id)); in iwl_mvm_sta_tx_agg_flush()
3285 iwl_mvm_drain_sta(mvm, mvmsta, false); in iwl_mvm_sta_tx_agg_flush()
3287 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); in iwl_mvm_sta_tx_agg_flush()
3293 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) in iwl_mvm_set_fw_key_idx() argument
3297 lockdep_assert_held(&mvm->mutex); in iwl_mvm_set_fw_key_idx()
3306 if (test_bit(i, mvm->fw_key_table)) in iwl_mvm_set_fw_key_idx()
3308 if (mvm->fw_key_deleted[i] > max) { in iwl_mvm_set_fw_key_idx()
3309 max = mvm->fw_key_deleted[i]; in iwl_mvm_set_fw_key_idx()
3320 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, in iwl_mvm_get_key_sta() argument
3338 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], in iwl_mvm_get_key_sta()
3339 lockdep_is_held(&mvm->mutex)); in iwl_mvm_get_key_sta()
3369 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, in iwl_mvm_send_sta_key() argument
3385 bool new_api = fw_has_api(&mvm->fw->ucode_capa, in iwl_mvm_send_sta_key()
3387 int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY, in iwl_mvm_send_sta_key()
3503 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, in iwl_mvm_send_sta_key()
3506 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, in iwl_mvm_send_sta_key()
3511 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); in iwl_mvm_send_sta_key()
3515 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); in iwl_mvm_send_sta_key()
3522 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, in iwl_mvm_send_sta_igtk() argument
3537 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && in iwl_mvm_send_sta_igtk()
3580 IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n", in iwl_mvm_send_sta_igtk()
3585 if (!iwl_mvm_has_new_rx_api(mvm)) { in iwl_mvm_send_sta_igtk()
3595 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, in iwl_mvm_send_sta_igtk()
3598 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, in iwl_mvm_send_sta_igtk()
3603 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, in iwl_mvm_get_mac_addr() argument
3615 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], in iwl_mvm_get_mac_addr()
3616 lockdep_is_held(&mvm->mutex)); in iwl_mvm_get_mac_addr()
3624 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, in __iwl_mvm_set_sta_key() argument
3648 IWL_ERR(mvm, "Failed to find station id\n"); in __iwl_mvm_set_sta_key()
3653 addr = iwl_mvm_get_mac_addr(mvm, vif, sta); in __iwl_mvm_set_sta_key()
3658 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, in __iwl_mvm_set_sta_key()
3663 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, in __iwl_mvm_set_sta_key()
3667 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, in iwl_mvm_set_sta_key() argument
3679 lockdep_assert_held(&mvm->mutex); in iwl_mvm_set_sta_key()
3684 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); in iwl_mvm_set_sta_key()
3686 IWL_ERR(mvm, "Failed to find station\n"); in iwl_mvm_set_sta_key()
3698 mvm->fw_id_to_mac_id[sta_id], in iwl_mvm_set_sta_key()
3699 lockdep_is_held(&mvm->mutex)); in iwl_mvm_set_sta_key()
3701 IWL_ERR(mvm, "Invalid station id\n"); in iwl_mvm_set_sta_key()
3717 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); in iwl_mvm_set_sta_key()
3733 key_offset = iwl_mvm_set_fw_key_idx(mvm); in iwl_mvm_set_sta_key()
3739 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); in iwl_mvm_set_sta_key()
3752 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, in iwl_mvm_set_sta_key()
3755 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); in iwl_mvm_set_sta_key()
3760 __set_bit(key_offset, mvm->fw_key_table); in iwl_mvm_set_sta_key()
3763 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", in iwl_mvm_set_sta_key()
3769 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, in iwl_mvm_remove_sta_key() argument
3779 lockdep_assert_held(&mvm->mutex); in iwl_mvm_remove_sta_key()
3782 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); in iwl_mvm_remove_sta_key()
3789 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", in iwl_mvm_remove_sta_key()
3795 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); in iwl_mvm_remove_sta_key()
3797 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { in iwl_mvm_remove_sta_key()
3798 IWL_ERR(mvm, "offset %d not used in fw key table.\n", in iwl_mvm_remove_sta_key()
3805 if (mvm->fw_key_deleted[i] < U8_MAX) in iwl_mvm_remove_sta_key()
3806 mvm->fw_key_deleted[i]++; in iwl_mvm_remove_sta_key()
3808 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; in iwl_mvm_remove_sta_key()
3811 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); in iwl_mvm_remove_sta_key()
3815 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); in iwl_mvm_remove_sta_key()
3822 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); in iwl_mvm_remove_sta_key()
3827 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, in iwl_mvm_update_tkip_key() argument
3839 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); in iwl_mvm_update_tkip_key()
3842 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, in iwl_mvm_update_tkip_key()
3850 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, in iwl_mvm_sta_modify_ps_wake() argument
3862 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, in iwl_mvm_sta_modify_ps_wake()
3863 iwl_mvm_add_sta_cmd_size(mvm), &cmd); in iwl_mvm_sta_modify_ps_wake()
3865 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); in iwl_mvm_sta_modify_ps_wake()
3868 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, in iwl_mvm_sta_modify_sleep_tx_count() argument
3910 n_queued = iwl_mvm_tid_queued(mvm, tid_data); in iwl_mvm_sta_modify_sleep_tx_count()
3942 iwl_trans_block_txq_ptrs(mvm->trans, true); in iwl_mvm_sta_modify_sleep_tx_count()
3944 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, in iwl_mvm_sta_modify_sleep_tx_count()
3946 iwl_mvm_add_sta_cmd_size(mvm), &cmd); in iwl_mvm_sta_modify_sleep_tx_count()
3948 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); in iwl_mvm_sta_modify_sleep_tx_count()
3951 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, in iwl_mvm_rx_eosp_notif() argument
3959 if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) in iwl_mvm_rx_eosp_notif()
3963 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_rx_eosp_notif()
3969 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, in iwl_mvm_sta_modify_disable_tx() argument
3981 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, in iwl_mvm_sta_modify_disable_tx()
3982 iwl_mvm_add_sta_cmd_size(mvm), &cmd); in iwl_mvm_sta_modify_disable_tx()
3984 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); in iwl_mvm_sta_modify_disable_tx()
3987 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, in iwl_mvm_sta_modify_disable_tx_ap() argument
4006 if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS)) in iwl_mvm_sta_modify_disable_tx_ap()
4007 ieee80211_sta_block_awake(mvm->hw, sta, disable); in iwl_mvm_sta_modify_disable_tx_ap()
4009 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); in iwl_mvm_sta_modify_disable_tx_ap()
4014 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, in iwl_mvm_int_sta_modify_disable_tx() argument
4029 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, in iwl_mvm_int_sta_modify_disable_tx()
4030 iwl_mvm_add_sta_cmd_size(mvm), &cmd); in iwl_mvm_int_sta_modify_disable_tx()
4032 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); in iwl_mvm_int_sta_modify_disable_tx()
4035 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, in iwl_mvm_modify_all_sta_disable_tx() argument
4046 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { in iwl_mvm_modify_all_sta_disable_tx()
4047 sta = rcu_dereference(mvm->fw_id_to_mac_id[i]); in iwl_mvm_modify_all_sta_disable_tx()
4056 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); in iwl_mvm_modify_all_sta_disable_tx()
4061 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) in iwl_mvm_modify_all_sta_disable_tx()
4066 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, in iwl_mvm_modify_all_sta_disable_tx()
4074 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, in iwl_mvm_modify_all_sta_disable_tx()
4078 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) in iwl_mvm_csa_client_absent() argument
4085 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); in iwl_mvm_csa_client_absent()
4088 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); in iwl_mvm_csa_client_absent()
4093 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) in iwl_mvm_tid_queued() argument
4101 if (mvm->trans->trans_cfg->gen2) in iwl_mvm_tid_queued()
4107 int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, in iwl_mvm_add_pasn_sta() argument
4116 ret = iwl_mvm_allocate_int_sta(mvm, sta, 0, in iwl_mvm_add_pasn_sta()
4122 ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, in iwl_mvm_add_pasn_sta()
4138 ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false, in iwl_mvm_add_pasn_sta()
4143 iwl_mvm_dealloc_int_sta(mvm, sta); in iwl_mvm_add_pasn_sta()
4147 void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, in iwl_mvm_cancel_channel_switch() argument
4156 ret = iwl_mvm_send_cmd_pdu(mvm, in iwl_mvm_cancel_channel_switch()
4162 IWL_ERR(mvm, "Failed to cancel the channel switch\n"); in iwl_mvm_cancel_channel_switch()