1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <linux/skbuff.h>
20 #include <linux/ctype.h>
21
22 #include "core.h"
23 #include "htc.h"
24 #include "debug.h"
25 #include "wmi.h"
26 #include "wmi-tlv.h"
27 #include "mac.h"
28 #include "testmode.h"
29 #include "wmi-ops.h"
30 #include "p2p.h"
31 #include "hw.h"
32 #include "hif.h"
33 #include "txrx.h"
34
35 #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
36 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
37 #define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6)
38
39 /* MAIN WMI cmd track */
40 static struct wmi_cmd_map wmi_cmd_map = {
41 .init_cmdid = WMI_INIT_CMDID,
42 .start_scan_cmdid = WMI_START_SCAN_CMDID,
43 .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
44 .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
45 .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
46 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
47 .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
48 .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
49 .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
50 .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
51 .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
52 .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
53 .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
54 .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
55 .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
56 .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
57 .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
58 .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
59 .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
60 .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
61 .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
62 .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
63 .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
64 .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
65 .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
66 .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
67 .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
68 .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
69 .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
70 .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
71 .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
72 .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
73 .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
74 .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
75 .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
76 .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
77 .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
78 .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
79 .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
80 .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
81 .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
82 .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
83 .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
84 .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
85 .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
86 .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
87 .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
88 .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
89 .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
90 .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
91 .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
92 .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
93 .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
94 .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
95 .roam_scan_mode = WMI_ROAM_SCAN_MODE,
96 .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
97 .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
98 .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
99 .roam_ap_profile = WMI_ROAM_AP_PROFILE,
100 .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
101 .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
102 .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
103 .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
104 .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
105 .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
106 .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
107 .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
108 .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
109 .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
110 .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
111 .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
112 .wlan_profile_set_hist_intvl_cmdid =
113 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
114 .wlan_profile_get_profile_data_cmdid =
115 WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
116 .wlan_profile_enable_profile_id_cmdid =
117 WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
118 .wlan_profile_list_profile_id_cmdid =
119 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
120 .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
121 .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
122 .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
123 .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
124 .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
125 .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
126 .wow_enable_disable_wake_event_cmdid =
127 WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
128 .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
129 .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
130 .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
131 .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
132 .vdev_spectral_scan_configure_cmdid =
133 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
134 .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
135 .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
136 .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
137 .network_list_offload_config_cmdid =
138 WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
139 .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
140 .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
141 .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
142 .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
143 .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
144 .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
145 .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
146 .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
147 .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
148 .echo_cmdid = WMI_ECHO_CMDID,
149 .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
150 .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
151 .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
152 .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
153 .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
154 .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
155 .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
156 .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
157 .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
158 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
159 .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
160 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
161 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
162 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
163 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
164 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
165 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
166 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
167 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
168 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
169 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
170 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
171 .nan_cmdid = WMI_CMD_UNSUPPORTED,
172 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
173 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
174 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
175 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
176 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
177 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
178 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
179 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
180 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
181 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
182 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
183 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
184 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
185 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
186 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
187 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
188 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
189 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
190 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
191 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
192 .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
193 .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
194 .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
195 .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
196 .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
197 .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
198 .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
199 .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
200 .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
201 .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
202 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
203 .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
204 };
205
206 /* 10.X WMI cmd track */
207 static struct wmi_cmd_map wmi_10x_cmd_map = {
208 .init_cmdid = WMI_10X_INIT_CMDID,
209 .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
210 .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
211 .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
212 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
213 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
214 .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
215 .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
216 .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
217 .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
218 .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
219 .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
220 .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
221 .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
222 .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
223 .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
224 .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
225 .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
226 .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
227 .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
228 .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
229 .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
230 .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
231 .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
232 .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
233 .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
234 .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
235 .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
236 .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
237 .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
238 .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
239 .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
240 .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
241 .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
242 .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
243 .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
244 .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
245 .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
246 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
247 .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
248 .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
249 .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
250 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
251 .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
252 .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
253 .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
254 .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
255 .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
256 .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
257 .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
258 .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
259 .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
260 .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
261 .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
262 .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
263 .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
264 .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
265 .roam_scan_rssi_change_threshold =
266 WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
267 .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
268 .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
269 .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
270 .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
271 .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
272 .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
273 .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
274 .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
275 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
276 .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
277 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
278 .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
279 .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
280 .wlan_profile_set_hist_intvl_cmdid =
281 WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
282 .wlan_profile_get_profile_data_cmdid =
283 WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
284 .wlan_profile_enable_profile_id_cmdid =
285 WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
286 .wlan_profile_list_profile_id_cmdid =
287 WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
288 .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
289 .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
290 .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
291 .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
292 .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
293 .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
294 .wow_enable_disable_wake_event_cmdid =
295 WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
296 .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
297 .wow_hostwakeup_from_sleep_cmdid =
298 WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
299 .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
300 .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
301 .vdev_spectral_scan_configure_cmdid =
302 WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
303 .vdev_spectral_scan_enable_cmdid =
304 WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
305 .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
306 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
307 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
308 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
309 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
310 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
311 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
312 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
313 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
314 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
315 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
316 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
317 .echo_cmdid = WMI_10X_ECHO_CMDID,
318 .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
319 .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
320 .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
321 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
322 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
323 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
324 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
325 .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
326 .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
327 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
328 .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
329 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
330 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
331 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
332 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
333 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
334 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
335 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
336 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
337 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
338 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
339 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
340 .nan_cmdid = WMI_CMD_UNSUPPORTED,
341 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
342 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
343 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
344 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
345 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
346 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
347 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
348 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
349 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
350 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
351 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
352 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
353 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
354 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
355 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
356 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
357 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
358 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
359 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
360 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
361 .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
362 .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
363 .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
364 .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
365 .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
366 .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
367 .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
368 .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
369 .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
370 .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
371 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
372 .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
373 };
374
375 /* 10.2.4 WMI cmd track */
376 static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
377 .init_cmdid = WMI_10_2_INIT_CMDID,
378 .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
379 .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
380 .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
381 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
382 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
383 .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
384 .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
385 .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
386 .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
387 .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
388 .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
389 .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
390 .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
391 .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
392 .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
393 .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
394 .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
395 .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
396 .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
397 .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
398 .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
399 .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
400 .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
401 .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
402 .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
403 .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
404 .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
405 .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
406 .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
407 .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
408 .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
409 .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
410 .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
411 .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
412 .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
413 .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
414 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
415 .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
416 .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
417 .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
418 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
419 .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
420 .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
421 .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
422 .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
423 .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
424 .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
425 .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
426 .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
427 .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
428 .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
429 .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
430 .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
431 .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
432 .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
433 .roam_scan_rssi_change_threshold =
434 WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
435 .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
436 .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
437 .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
438 .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
439 .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
440 .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
441 .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
442 .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
443 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
444 .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
445 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
446 .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
447 .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
448 .wlan_profile_set_hist_intvl_cmdid =
449 WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
450 .wlan_profile_get_profile_data_cmdid =
451 WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
452 .wlan_profile_enable_profile_id_cmdid =
453 WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
454 .wlan_profile_list_profile_id_cmdid =
455 WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
456 .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
457 .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
458 .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
459 .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
460 .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
461 .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
462 .wow_enable_disable_wake_event_cmdid =
463 WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
464 .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
465 .wow_hostwakeup_from_sleep_cmdid =
466 WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
467 .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
468 .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
469 .vdev_spectral_scan_configure_cmdid =
470 WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
471 .vdev_spectral_scan_enable_cmdid =
472 WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
473 .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
474 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
475 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
476 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
477 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
478 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
479 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
480 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
481 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
482 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
483 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
484 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
485 .echo_cmdid = WMI_10_2_ECHO_CMDID,
486 .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
487 .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
488 .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
489 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
490 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
491 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
492 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
493 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
494 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
495 .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
496 .pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS,
497 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
498 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
499 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
500 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
501 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
502 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
503 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
504 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
505 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
506 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
507 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
508 .nan_cmdid = WMI_CMD_UNSUPPORTED,
509 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
510 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
511 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
512 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
513 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
514 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
515 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
516 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
517 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
518 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
519 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
520 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
521 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
522 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
523 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
524 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
525 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
526 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
527 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
528 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
529 .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
530 .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
531 .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
532 .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
533 .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
534 .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
535 .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
536 .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
537 .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
538 .pdev_bss_chan_info_request_cmdid =
539 WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
540 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
541 .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
542 };
543
544 /* 10.4 WMI cmd track */
545 static struct wmi_cmd_map wmi_10_4_cmd_map = {
546 .init_cmdid = WMI_10_4_INIT_CMDID,
547 .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
548 .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
549 .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
550 .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
551 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
552 .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
553 .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
554 .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
555 .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
556 .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
557 .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
558 .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
559 .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
560 .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
561 .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
562 .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
563 .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
564 .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
565 .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
566 .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
567 .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
568 .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
569 .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
570 .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
571 .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
572 .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
573 .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
574 .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
575 .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
576 .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
577 .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
578 .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
579 .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
580 .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
581 .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
582 .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
583 .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
584 .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
585 .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
586 .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
587 .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
588 .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
589 .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
590 .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
591 .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
592 .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
593 .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
594 .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
595 .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
596 .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
597 .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
598 .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
599 .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
600 .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
601 .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
602 .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
603 .roam_scan_rssi_change_threshold =
604 WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
605 .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
606 .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
607 .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
608 .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
609 .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
610 .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
611 .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
612 .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
613 .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
614 .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
615 .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
616 .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
617 .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
618 .wlan_profile_set_hist_intvl_cmdid =
619 WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
620 .wlan_profile_get_profile_data_cmdid =
621 WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
622 .wlan_profile_enable_profile_id_cmdid =
623 WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
624 .wlan_profile_list_profile_id_cmdid =
625 WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
626 .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
627 .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
628 .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
629 .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
630 .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
631 .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
632 .wow_enable_disable_wake_event_cmdid =
633 WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
634 .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
635 .wow_hostwakeup_from_sleep_cmdid =
636 WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
637 .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
638 .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
639 .vdev_spectral_scan_configure_cmdid =
640 WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
641 .vdev_spectral_scan_enable_cmdid =
642 WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
643 .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
644 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
645 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
646 .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
647 .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
648 .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
649 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
650 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
651 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
652 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
653 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
654 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
655 .echo_cmdid = WMI_10_4_ECHO_CMDID,
656 .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
657 .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
658 .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
659 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
660 .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
661 .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
662 .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
663 .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
664 .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
665 .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
666 .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
667 .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
668 .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
669 .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
670 .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
671 .wlan_peer_caching_add_peer_cmdid =
672 WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
673 .wlan_peer_caching_evict_peer_cmdid =
674 WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
675 .wlan_peer_caching_restore_peer_cmdid =
676 WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
677 .wlan_peer_caching_print_all_peers_info_cmdid =
678 WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
679 .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
680 .peer_add_proxy_sta_entry_cmdid =
681 WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
682 .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
683 .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
684 .nan_cmdid = WMI_10_4_NAN_CMDID,
685 .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
686 .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
687 .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
688 .pdev_smart_ant_set_rx_antenna_cmdid =
689 WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
690 .peer_smart_ant_set_tx_antenna_cmdid =
691 WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
692 .peer_smart_ant_set_train_info_cmdid =
693 WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
694 .peer_smart_ant_set_node_config_ops_cmdid =
695 WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
696 .pdev_set_antenna_switch_table_cmdid =
697 WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
698 .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
699 .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
700 .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
701 .pdev_ratepwr_chainmsk_table_cmdid =
702 WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
703 .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
704 .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
705 .fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
706 .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
707 .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
708 .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
709 .pdev_get_ani_ofdm_config_cmdid =
710 WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
711 .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
712 .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
713 .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
714 .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
715 .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
716 .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
717 .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
718 .vdev_filter_neighbor_rx_packets_cmdid =
719 WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
720 .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
721 .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
722 .pdev_bss_chan_info_request_cmdid =
723 WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
724 .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
725 .vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID,
726 .set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID,
727 .atf_ssid_grouping_request_cmdid =
728 WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
729 .peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
730 .set_periodic_channel_stats_cfg_cmdid =
731 WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
732 .peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID,
733 .btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID,
734 .peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
735 .peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
736 .peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
737 .pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
738 .coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID,
739 .pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
740 .pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
741 .vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
742 .prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
743 .config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
744 .debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
745 .get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID,
746 .pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
747 .vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
748 .pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
749 .tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID,
750 .tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID,
751 .tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
752 .radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
753 };
754
755 /* MAIN WMI VDEV param map */
756 static struct wmi_vdev_param_map wmi_vdev_param_map = {
757 .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
758 .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
759 .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
760 .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
761 .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
762 .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
763 .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
764 .preamble = WMI_VDEV_PARAM_PREAMBLE,
765 .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
766 .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
767 .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
768 .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
769 .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
770 .wmi_vdev_oc_scheduler_air_time_limit =
771 WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
772 .wds = WMI_VDEV_PARAM_WDS,
773 .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
774 .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
775 .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
776 .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
777 .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
778 .chwidth = WMI_VDEV_PARAM_CHWIDTH,
779 .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
780 .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
781 .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
782 .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
783 .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
784 .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
785 .sgi = WMI_VDEV_PARAM_SGI,
786 .ldpc = WMI_VDEV_PARAM_LDPC,
787 .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
788 .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
789 .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
790 .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
791 .nss = WMI_VDEV_PARAM_NSS,
792 .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
793 .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
794 .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
795 .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
796 .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
797 .ap_keepalive_min_idle_inactive_time_secs =
798 WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
799 .ap_keepalive_max_idle_inactive_time_secs =
800 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
801 .ap_keepalive_max_unresponsive_time_secs =
802 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
803 .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
804 .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
805 .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
806 .txbf = WMI_VDEV_PARAM_TXBF,
807 .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
808 .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
809 .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
810 .ap_detect_out_of_sync_sleeping_sta_time_secs =
811 WMI_VDEV_PARAM_UNSUPPORTED,
812 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
813 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
814 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
815 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
816 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
817 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
818 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
819 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
820 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
821 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
822 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
823 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
824 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
825 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
826 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
827 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
828 };
829
830 /* 10.X WMI VDEV param map */
831 static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
832 .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
833 .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
834 .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
835 .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
836 .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
837 .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
838 .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
839 .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
840 .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
841 .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
842 .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
843 .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
844 .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
845 .wmi_vdev_oc_scheduler_air_time_limit =
846 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
847 .wds = WMI_10X_VDEV_PARAM_WDS,
848 .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
849 .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
850 .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
851 .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
852 .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
853 .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
854 .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
855 .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
856 .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
857 .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
858 .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
859 .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
860 .sgi = WMI_10X_VDEV_PARAM_SGI,
861 .ldpc = WMI_10X_VDEV_PARAM_LDPC,
862 .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
863 .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
864 .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
865 .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
866 .nss = WMI_10X_VDEV_PARAM_NSS,
867 .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
868 .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
869 .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
870 .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
871 .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
872 .ap_keepalive_min_idle_inactive_time_secs =
873 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
874 .ap_keepalive_max_idle_inactive_time_secs =
875 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
876 .ap_keepalive_max_unresponsive_time_secs =
877 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
878 .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
879 .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
880 .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
881 .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
882 .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
883 .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
884 .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
885 .ap_detect_out_of_sync_sleeping_sta_time_secs =
886 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
887 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
888 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
889 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
890 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
891 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
892 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
893 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
894 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
895 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
896 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
897 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
898 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
899 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
900 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
901 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
902 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
903 };
904
905 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
906 .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
907 .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
908 .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
909 .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
910 .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
911 .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
912 .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
913 .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
914 .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
915 .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
916 .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
917 .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
918 .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
919 .wmi_vdev_oc_scheduler_air_time_limit =
920 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
921 .wds = WMI_10X_VDEV_PARAM_WDS,
922 .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
923 .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
924 .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
925 .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
926 .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
927 .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
928 .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
929 .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
930 .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
931 .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
932 .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
933 .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
934 .sgi = WMI_10X_VDEV_PARAM_SGI,
935 .ldpc = WMI_10X_VDEV_PARAM_LDPC,
936 .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
937 .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
938 .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
939 .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
940 .nss = WMI_10X_VDEV_PARAM_NSS,
941 .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
942 .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
943 .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
944 .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
945 .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
946 .ap_keepalive_min_idle_inactive_time_secs =
947 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
948 .ap_keepalive_max_idle_inactive_time_secs =
949 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
950 .ap_keepalive_max_unresponsive_time_secs =
951 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
952 .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
953 .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
954 .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
955 .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
956 .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
957 .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
958 .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
959 .ap_detect_out_of_sync_sleeping_sta_time_secs =
960 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
961 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
962 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
963 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
964 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
965 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
966 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
967 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
968 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
969 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
970 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
971 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
972 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
973 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
974 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
975 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
976 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
977 };
978
979 static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
980 .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
981 .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
982 .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
983 .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
984 .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
985 .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
986 .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
987 .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
988 .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
989 .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
990 .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
991 .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
992 .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
993 .wmi_vdev_oc_scheduler_air_time_limit =
994 WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
995 .wds = WMI_10_4_VDEV_PARAM_WDS,
996 .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
997 .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
998 .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
999 .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
1000 .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
1001 .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
1002 .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
1003 .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
1004 .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
1005 .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
1006 .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
1007 .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
1008 .sgi = WMI_10_4_VDEV_PARAM_SGI,
1009 .ldpc = WMI_10_4_VDEV_PARAM_LDPC,
1010 .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
1011 .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
1012 .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
1013 .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
1014 .nss = WMI_10_4_VDEV_PARAM_NSS,
1015 .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
1016 .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
1017 .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
1018 .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
1019 .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
1020 .ap_keepalive_min_idle_inactive_time_secs =
1021 WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
1022 .ap_keepalive_max_idle_inactive_time_secs =
1023 WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
1024 .ap_keepalive_max_unresponsive_time_secs =
1025 WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
1026 .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
1027 .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
1028 .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
1029 .txbf = WMI_10_4_VDEV_PARAM_TXBF,
1030 .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
1031 .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
1032 .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
1033 .ap_detect_out_of_sync_sleeping_sta_time_secs =
1034 WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
1035 .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
1036 .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
1037 .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
1038 .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
1039 .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
1040 .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
1041 .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
1042 .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
1043 .early_rx_bmiss_sample_cycle =
1044 WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
1045 .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
1046 .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
1047 .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
1048 .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
1049 .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
1050 .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
1051 .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
1052 .inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
1053 .dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
1054 };
1055
1056 static struct wmi_pdev_param_map wmi_pdev_param_map = {
1057 .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
1058 .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
1059 .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
1060 .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
1061 .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
1062 .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
1063 .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
1064 .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1065 .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
1066 .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
1067 .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1068 .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
1069 .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
1070 .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1071 .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
1072 .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
1073 .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
1074 .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
1075 .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
1076 .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1077 .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1078 .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
1079 .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1080 .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
1081 .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
1082 .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1083 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1084 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1085 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1086 .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1087 .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1088 .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1089 .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1090 .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
1091 .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
1092 .dcs = WMI_PDEV_PARAM_DCS,
1093 .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
1094 .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
1095 .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
1096 .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
1097 .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
1098 .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
1099 .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
1100 .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
1101 .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
1102 .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1103 .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
1104 .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1105 .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
1106 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1107 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1108 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1109 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1110 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1111 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1112 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1113 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1114 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1115 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1116 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1117 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1118 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1119 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1120 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1121 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1122 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1123 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1124 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1125 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1126 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1127 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1128 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1129 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1130 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1131 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1132 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1133 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1134 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1135 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1136 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1137 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1138 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1139 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1140 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1141 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1142 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1143 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1144 .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1145 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1146 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1147 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1148 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1149 };
1150
1151 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
1152 .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1153 .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1154 .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1155 .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1156 .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1157 .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1158 .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1159 .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1160 .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1161 .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1162 .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1163 .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1164 .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1165 .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1166 .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1167 .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1168 .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1169 .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1170 .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1171 .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1172 .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1173 .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1174 .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1175 .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1176 .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1177 .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1178 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1179 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1180 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1181 .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1182 .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1183 .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1184 .bcnflt_stats_update_period =
1185 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1186 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1187 .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1188 .dcs = WMI_10X_PDEV_PARAM_DCS,
1189 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1190 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1191 .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1192 .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1193 .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1194 .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1195 .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1196 .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1197 .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1198 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1199 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1200 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1201 .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1202 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1203 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1204 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1205 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1206 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1207 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1208 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1209 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1210 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1211 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1212 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1213 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1214 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1215 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1216 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1217 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1218 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1219 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1220 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1221 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1222 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1223 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1224 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1225 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1226 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1227 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1228 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1229 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1230 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1231 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1232 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1233 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1234 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1235 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1236 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1237 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1238 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1239 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1240 .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1241 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1242 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1243 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1244 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1245 };
1246
1247 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
1248 .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1249 .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1250 .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1251 .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1252 .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1253 .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1254 .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1255 .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1256 .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1257 .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1258 .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1259 .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1260 .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1261 .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1262 .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1263 .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1264 .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1265 .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1266 .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1267 .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1268 .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1269 .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1270 .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1271 .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1272 .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1273 .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1274 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1275 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1276 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1277 .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1278 .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1279 .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1280 .bcnflt_stats_update_period =
1281 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1282 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1283 .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1284 .dcs = WMI_10X_PDEV_PARAM_DCS,
1285 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1286 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1287 .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1288 .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1289 .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1290 .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1291 .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1292 .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1293 .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1294 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1295 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1296 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1297 .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1298 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1299 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1300 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1301 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1302 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1303 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1304 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1305 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1306 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1307 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1308 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1309 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1310 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1311 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1312 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1313 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1314 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1315 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1316 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1317 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1318 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1319 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1320 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1321 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1322 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1323 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1324 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1325 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1326 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1327 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1328 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1329 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1330 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1331 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1332 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1333 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1334 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1335 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1336 .pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET,
1337 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1338 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1339 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1340 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1341 };
1342
1343 /* firmware 10.2 specific mappings */
1344 static struct wmi_cmd_map wmi_10_2_cmd_map = {
1345 .init_cmdid = WMI_10_2_INIT_CMDID,
1346 .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
1347 .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
1348 .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
1349 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
1350 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
1351 .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
1352 .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
1353 .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
1354 .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
1355 .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
1356 .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
1357 .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
1358 .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
1359 .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
1360 .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
1361 .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
1362 .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
1363 .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
1364 .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
1365 .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
1366 .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
1367 .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
1368 .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
1369 .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
1370 .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
1371 .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
1372 .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
1373 .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
1374 .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
1375 .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
1376 .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
1377 .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
1378 .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
1379 .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
1380 .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
1381 .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
1382 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1383 .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
1384 .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
1385 .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
1386 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1387 .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
1388 .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
1389 .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
1390 .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
1391 .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
1392 .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
1393 .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
1394 .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
1395 .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
1396 .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
1397 .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
1398 .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
1399 .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
1400 .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
1401 .roam_scan_rssi_change_threshold =
1402 WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
1403 .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
1404 .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
1405 .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
1406 .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
1407 .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
1408 .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
1409 .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
1410 .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
1411 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
1412 .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
1413 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
1414 .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
1415 .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
1416 .wlan_profile_set_hist_intvl_cmdid =
1417 WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
1418 .wlan_profile_get_profile_data_cmdid =
1419 WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
1420 .wlan_profile_enable_profile_id_cmdid =
1421 WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
1422 .wlan_profile_list_profile_id_cmdid =
1423 WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
1424 .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
1425 .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
1426 .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
1427 .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
1428 .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
1429 .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
1430 .wow_enable_disable_wake_event_cmdid =
1431 WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
1432 .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
1433 .wow_hostwakeup_from_sleep_cmdid =
1434 WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
1435 .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
1436 .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
1437 .vdev_spectral_scan_configure_cmdid =
1438 WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
1439 .vdev_spectral_scan_enable_cmdid =
1440 WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
1441 .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
1442 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
1443 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
1444 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
1445 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
1446 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
1447 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
1448 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
1449 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
1450 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
1451 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
1452 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
1453 .echo_cmdid = WMI_10_2_ECHO_CMDID,
1454 .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
1455 .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
1456 .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
1457 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
1458 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1459 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1460 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
1461 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
1462 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
1463 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
1464 .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
1465 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
1466 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
1467 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
1468 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
1469 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
1470 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
1471 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
1472 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
1473 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
1474 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1475 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
1476 .nan_cmdid = WMI_CMD_UNSUPPORTED,
1477 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
1478 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
1479 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
1480 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1481 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1482 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
1483 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
1484 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
1485 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
1486 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
1487 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
1488 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
1489 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
1490 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
1491 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
1492 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1493 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1494 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
1495 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
1496 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
1497 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
1498 .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
1499 };
1500
1501 static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
1502 .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
1503 .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
1504 .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
1505 .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
1506 .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
1507 .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
1508 .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
1509 .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1510 .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
1511 .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
1512 .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1513 .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
1514 .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
1515 .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1516 .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
1517 .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
1518 .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
1519 .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
1520 .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
1521 .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1522 .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1523 .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
1524 .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1525 .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
1526 .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
1527 .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1528 .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
1529 .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1530 .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1531 .pdev_stats_update_period =
1532 WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1533 .vdev_stats_update_period =
1534 WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1535 .peer_stats_update_period =
1536 WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1537 .bcnflt_stats_update_period =
1538 WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1539 .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
1540 .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
1541 .dcs = WMI_10_4_PDEV_PARAM_DCS,
1542 .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
1543 .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
1544 .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
1545 .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
1546 .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
1547 .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
1548 .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
1549 .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
1550 .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
1551 .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
1552 .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
1553 .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
1554 .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
1555 .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
1556 .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
1557 .smart_antenna_default_antenna =
1558 WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
1559 .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
1560 .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
1561 .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
1562 .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
1563 .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
1564 .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
1565 .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
1566 .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
1567 .remove_mcast2ucast_buffer =
1568 WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
1569 .peer_sta_ps_statechg_enable =
1570 WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
1571 .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
1572 .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
1573 .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
1574 .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
1575 .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
1576 .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
1577 .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
1578 .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
1579 .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
1580 .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
1581 .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
1582 .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
1583 .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
1584 .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
1585 .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
1586 .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
1587 .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
1588 .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
1589 .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
1590 .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
1591 .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
1592 .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
1593 .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
1594 .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
1595 .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
1596 .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
1597 .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
1598 .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
1599 .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
1600 .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
1601 };
1602
1603 static const struct wmi_peer_flags_map wmi_peer_flags_map = {
1604 .auth = WMI_PEER_AUTH,
1605 .qos = WMI_PEER_QOS,
1606 .need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY,
1607 .need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY,
1608 .apsd = WMI_PEER_APSD,
1609 .ht = WMI_PEER_HT,
1610 .bw40 = WMI_PEER_40MHZ,
1611 .stbc = WMI_PEER_STBC,
1612 .ldbc = WMI_PEER_LDPC,
1613 .dyn_mimops = WMI_PEER_DYN_MIMOPS,
1614 .static_mimops = WMI_PEER_STATIC_MIMOPS,
1615 .spatial_mux = WMI_PEER_SPATIAL_MUX,
1616 .vht = WMI_PEER_VHT,
1617 .bw80 = WMI_PEER_80MHZ,
1618 .vht_2g = WMI_PEER_VHT_2G,
1619 .pmf = WMI_PEER_PMF,
1620 .bw160 = WMI_PEER_160MHZ,
1621 };
1622
1623 static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
1624 .auth = WMI_10X_PEER_AUTH,
1625 .qos = WMI_10X_PEER_QOS,
1626 .need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY,
1627 .need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY,
1628 .apsd = WMI_10X_PEER_APSD,
1629 .ht = WMI_10X_PEER_HT,
1630 .bw40 = WMI_10X_PEER_40MHZ,
1631 .stbc = WMI_10X_PEER_STBC,
1632 .ldbc = WMI_10X_PEER_LDPC,
1633 .dyn_mimops = WMI_10X_PEER_DYN_MIMOPS,
1634 .static_mimops = WMI_10X_PEER_STATIC_MIMOPS,
1635 .spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
1636 .vht = WMI_10X_PEER_VHT,
1637 .bw80 = WMI_10X_PEER_80MHZ,
1638 .bw160 = WMI_10X_PEER_160MHZ,
1639 };
1640
1641 static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
1642 .auth = WMI_10_2_PEER_AUTH,
1643 .qos = WMI_10_2_PEER_QOS,
1644 .need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY,
1645 .need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY,
1646 .apsd = WMI_10_2_PEER_APSD,
1647 .ht = WMI_10_2_PEER_HT,
1648 .bw40 = WMI_10_2_PEER_40MHZ,
1649 .stbc = WMI_10_2_PEER_STBC,
1650 .ldbc = WMI_10_2_PEER_LDPC,
1651 .dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS,
1652 .static_mimops = WMI_10_2_PEER_STATIC_MIMOPS,
1653 .spatial_mux = WMI_10_2_PEER_SPATIAL_MUX,
1654 .vht = WMI_10_2_PEER_VHT,
1655 .bw80 = WMI_10_2_PEER_80MHZ,
1656 .vht_2g = WMI_10_2_PEER_VHT_2G,
1657 .pmf = WMI_10_2_PEER_PMF,
1658 .bw160 = WMI_10_2_PEER_160MHZ,
1659 };
1660
ath10k_wmi_put_wmi_channel(struct wmi_channel * ch,const struct wmi_channel_arg * arg)1661 void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
1662 const struct wmi_channel_arg *arg)
1663 {
1664 u32 flags = 0;
1665
1666 memset(ch, 0, sizeof(*ch));
1667
1668 if (arg->passive)
1669 flags |= WMI_CHAN_FLAG_PASSIVE;
1670 if (arg->allow_ibss)
1671 flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
1672 if (arg->allow_ht)
1673 flags |= WMI_CHAN_FLAG_ALLOW_HT;
1674 if (arg->allow_vht)
1675 flags |= WMI_CHAN_FLAG_ALLOW_VHT;
1676 if (arg->ht40plus)
1677 flags |= WMI_CHAN_FLAG_HT40_PLUS;
1678 if (arg->chan_radar)
1679 flags |= WMI_CHAN_FLAG_DFS;
1680
1681 ch->mhz = __cpu_to_le32(arg->freq);
1682 ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
1683 if (arg->mode == MODE_11AC_VHT80_80)
1684 ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
1685 else
1686 ch->band_center_freq2 = 0;
1687 ch->min_power = arg->min_power;
1688 ch->max_power = arg->max_power;
1689 ch->reg_power = arg->max_reg_power;
1690 ch->antenna_max = arg->max_antenna_gain;
1691 ch->max_tx_power = arg->max_power;
1692
1693 /* mode & flags share storage */
1694 ch->mode = arg->mode;
1695 ch->flags |= __cpu_to_le32(flags);
1696 }
1697
ath10k_wmi_wait_for_service_ready(struct ath10k * ar)1698 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
1699 {
1700 unsigned long time_left;
1701
1702 time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1703 WMI_SERVICE_READY_TIMEOUT_HZ);
1704 if (!time_left)
1705 return -ETIMEDOUT;
1706 return 0;
1707 }
1708
ath10k_wmi_wait_for_unified_ready(struct ath10k * ar)1709 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
1710 {
1711 unsigned long time_left;
1712
1713 time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
1714 WMI_UNIFIED_READY_TIMEOUT_HZ);
1715 if (!time_left)
1716 return -ETIMEDOUT;
1717 return 0;
1718 }
1719
ath10k_wmi_alloc_skb(struct ath10k * ar,u32 len)1720 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
1721 {
1722 struct sk_buff *skb;
1723 u32 round_len = roundup(len, 4);
1724
1725 skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
1726 if (!skb)
1727 return NULL;
1728
1729 skb_reserve(skb, WMI_SKB_HEADROOM);
1730 if (!IS_ALIGNED((unsigned long)skb->data, 4))
1731 ath10k_warn(ar, "Unaligned WMI skb\n");
1732
1733 skb_put(skb, round_len);
1734 memset(skb->data, 0, round_len);
1735
1736 return skb;
1737 }
1738
ath10k_wmi_htc_tx_complete(struct ath10k * ar,struct sk_buff * skb)1739 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
1740 {
1741 dev_kfree_skb(skb);
1742 }
1743
ath10k_wmi_cmd_send_nowait(struct ath10k * ar,struct sk_buff * skb,u32 cmd_id)1744 int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
1745 u32 cmd_id)
1746 {
1747 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
1748 struct wmi_cmd_hdr *cmd_hdr;
1749 int ret;
1750 u32 cmd = 0;
1751
1752 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
1753 return -ENOMEM;
1754
1755 cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
1756
1757 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1758 cmd_hdr->cmd_id = __cpu_to_le32(cmd);
1759
1760 memset(skb_cb, 0, sizeof(*skb_cb));
1761 trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
1762 ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
1763
1764 if (ret)
1765 goto err_pull;
1766
1767 return 0;
1768
1769 err_pull:
1770 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
1771 return ret;
1772 }
1773
ath10k_wmi_tx_beacon_nowait(struct ath10k_vif * arvif)1774 static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
1775 {
1776 struct ath10k *ar = arvif->ar;
1777 struct ath10k_skb_cb *cb;
1778 struct sk_buff *bcn;
1779 bool dtim_zero;
1780 bool deliver_cab;
1781 int ret;
1782
1783 spin_lock_bh(&ar->data_lock);
1784
1785 bcn = arvif->beacon;
1786
1787 if (!bcn)
1788 goto unlock;
1789
1790 cb = ATH10K_SKB_CB(bcn);
1791
1792 switch (arvif->beacon_state) {
1793 case ATH10K_BEACON_SENDING:
1794 case ATH10K_BEACON_SENT:
1795 break;
1796 case ATH10K_BEACON_SCHEDULED:
1797 arvif->beacon_state = ATH10K_BEACON_SENDING;
1798 spin_unlock_bh(&ar->data_lock);
1799
1800 dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO);
1801 deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB);
1802 ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
1803 arvif->vdev_id,
1804 bcn->data, bcn->len,
1805 cb->paddr,
1806 dtim_zero,
1807 deliver_cab);
1808
1809 spin_lock_bh(&ar->data_lock);
1810
1811 if (ret == 0)
1812 arvif->beacon_state = ATH10K_BEACON_SENT;
1813 else
1814 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
1815 }
1816
1817 unlock:
1818 spin_unlock_bh(&ar->data_lock);
1819 }
1820
ath10k_wmi_tx_beacons_iter(void * data,u8 * mac,struct ieee80211_vif * vif)1821 static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
1822 struct ieee80211_vif *vif)
1823 {
1824 struct ath10k_vif *arvif = (void *)vif->drv_priv;
1825
1826 ath10k_wmi_tx_beacon_nowait(arvif);
1827 }
1828
ath10k_wmi_tx_beacons_nowait(struct ath10k * ar)1829 static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
1830 {
1831 ieee80211_iterate_active_interfaces_atomic(ar->hw,
1832 IEEE80211_IFACE_ITER_NORMAL,
1833 ath10k_wmi_tx_beacons_iter,
1834 NULL);
1835 }
1836
ath10k_wmi_op_ep_tx_credits(struct ath10k * ar)1837 static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
1838 {
1839 /* try to send pending beacons first. they take priority */
1840 ath10k_wmi_tx_beacons_nowait(ar);
1841
1842 wake_up(&ar->wmi.tx_credits_wq);
1843 }
1844
ath10k_wmi_cmd_send(struct ath10k * ar,struct sk_buff * skb,u32 cmd_id)1845 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
1846 {
1847 int ret = -EOPNOTSUPP;
1848
1849 might_sleep();
1850
1851 if (cmd_id == WMI_CMD_UNSUPPORTED) {
1852 ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
1853 cmd_id);
1854 return ret;
1855 }
1856
1857 wait_event_timeout(ar->wmi.tx_credits_wq, ({
1858 /* try to send pending beacons first. they take priority */
1859 ath10k_wmi_tx_beacons_nowait(ar);
1860
1861 ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
1862
1863 if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
1864 ret = -ESHUTDOWN;
1865
1866 (ret != -EAGAIN);
1867 }), 3 * HZ);
1868
1869 if (ret)
1870 dev_kfree_skb_any(skb);
1871
1872 return ret;
1873 }
1874
1875 static struct sk_buff *
ath10k_wmi_op_gen_mgmt_tx(struct ath10k * ar,struct sk_buff * msdu)1876 ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
1877 {
1878 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
1879 struct ath10k_vif *arvif;
1880 struct wmi_mgmt_tx_cmd *cmd;
1881 struct ieee80211_hdr *hdr;
1882 struct sk_buff *skb;
1883 int len;
1884 u32 vdev_id;
1885 u32 buf_len = msdu->len;
1886 u16 fc;
1887
1888 hdr = (struct ieee80211_hdr *)msdu->data;
1889 fc = le16_to_cpu(hdr->frame_control);
1890
1891 if (cb->vif) {
1892 arvif = (void *)cb->vif->drv_priv;
1893 vdev_id = arvif->vdev_id;
1894 } else {
1895 vdev_id = 0;
1896 }
1897
1898 if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
1899 return ERR_PTR(-EINVAL);
1900
1901 len = sizeof(cmd->hdr) + msdu->len;
1902
1903 if ((ieee80211_is_action(hdr->frame_control) ||
1904 ieee80211_is_deauth(hdr->frame_control) ||
1905 ieee80211_is_disassoc(hdr->frame_control)) &&
1906 ieee80211_has_protected(hdr->frame_control)) {
1907 len += IEEE80211_CCMP_MIC_LEN;
1908 buf_len += IEEE80211_CCMP_MIC_LEN;
1909 }
1910
1911 len = round_up(len, 4);
1912
1913 skb = ath10k_wmi_alloc_skb(ar, len);
1914 if (!skb)
1915 return ERR_PTR(-ENOMEM);
1916
1917 cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
1918
1919 cmd->hdr.vdev_id = __cpu_to_le32(vdev_id);
1920 cmd->hdr.tx_rate = 0;
1921 cmd->hdr.tx_power = 0;
1922 cmd->hdr.buf_len = __cpu_to_le32(buf_len);
1923
1924 ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
1925 memcpy(cmd->buf, msdu->data, msdu->len);
1926
1927 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
1928 msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
1929 fc & IEEE80211_FCTL_STYPE);
1930 trace_ath10k_tx_hdr(ar, skb->data, skb->len);
1931 trace_ath10k_tx_payload(ar, skb->data, skb->len);
1932
1933 return skb;
1934 }
1935
ath10k_wmi_event_scan_started(struct ath10k * ar)1936 static void ath10k_wmi_event_scan_started(struct ath10k *ar)
1937 {
1938 lockdep_assert_held(&ar->data_lock);
1939
1940 switch (ar->scan.state) {
1941 case ATH10K_SCAN_IDLE:
1942 case ATH10K_SCAN_RUNNING:
1943 case ATH10K_SCAN_ABORTING:
1944 ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
1945 ath10k_scan_state_str(ar->scan.state),
1946 ar->scan.state);
1947 break;
1948 case ATH10K_SCAN_STARTING:
1949 ar->scan.state = ATH10K_SCAN_RUNNING;
1950
1951 if (ar->scan.is_roc)
1952 ieee80211_ready_on_channel(ar->hw);
1953
1954 complete(&ar->scan.started);
1955 break;
1956 }
1957 }
1958
ath10k_wmi_event_scan_start_failed(struct ath10k * ar)1959 static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
1960 {
1961 lockdep_assert_held(&ar->data_lock);
1962
1963 switch (ar->scan.state) {
1964 case ATH10K_SCAN_IDLE:
1965 case ATH10K_SCAN_RUNNING:
1966 case ATH10K_SCAN_ABORTING:
1967 ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
1968 ath10k_scan_state_str(ar->scan.state),
1969 ar->scan.state);
1970 break;
1971 case ATH10K_SCAN_STARTING:
1972 complete(&ar->scan.started);
1973 __ath10k_scan_finish(ar);
1974 break;
1975 }
1976 }
1977
ath10k_wmi_event_scan_completed(struct ath10k * ar)1978 static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
1979 {
1980 lockdep_assert_held(&ar->data_lock);
1981
1982 switch (ar->scan.state) {
1983 case ATH10K_SCAN_IDLE:
1984 case ATH10K_SCAN_STARTING:
1985 /* One suspected reason scan can be completed while starting is
1986 * if firmware fails to deliver all scan events to the host,
1987 * e.g. when transport pipe is full. This has been observed
1988 * with spectral scan phyerr events starving wmi transport
1989 * pipe. In such case the "scan completed" event should be (and
1990 * is) ignored by the host as it may be just firmware's scan
1991 * state machine recovering.
1992 */
1993 ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
1994 ath10k_scan_state_str(ar->scan.state),
1995 ar->scan.state);
1996 break;
1997 case ATH10K_SCAN_RUNNING:
1998 case ATH10K_SCAN_ABORTING:
1999 __ath10k_scan_finish(ar);
2000 break;
2001 }
2002 }
2003
ath10k_wmi_event_scan_bss_chan(struct ath10k * ar)2004 static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
2005 {
2006 lockdep_assert_held(&ar->data_lock);
2007
2008 switch (ar->scan.state) {
2009 case ATH10K_SCAN_IDLE:
2010 case ATH10K_SCAN_STARTING:
2011 ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
2012 ath10k_scan_state_str(ar->scan.state),
2013 ar->scan.state);
2014 break;
2015 case ATH10K_SCAN_RUNNING:
2016 case ATH10K_SCAN_ABORTING:
2017 ar->scan_channel = NULL;
2018 break;
2019 }
2020 }
2021
ath10k_wmi_event_scan_foreign_chan(struct ath10k * ar,u32 freq)2022 static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
2023 {
2024 lockdep_assert_held(&ar->data_lock);
2025
2026 switch (ar->scan.state) {
2027 case ATH10K_SCAN_IDLE:
2028 case ATH10K_SCAN_STARTING:
2029 ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
2030 ath10k_scan_state_str(ar->scan.state),
2031 ar->scan.state);
2032 break;
2033 case ATH10K_SCAN_RUNNING:
2034 case ATH10K_SCAN_ABORTING:
2035 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
2036
2037 if (ar->scan.is_roc && ar->scan.roc_freq == freq)
2038 complete(&ar->scan.on_channel);
2039 break;
2040 }
2041 }
2042
2043 static const char *
ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,enum wmi_scan_completion_reason reason)2044 ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
2045 enum wmi_scan_completion_reason reason)
2046 {
2047 switch (type) {
2048 case WMI_SCAN_EVENT_STARTED:
2049 return "started";
2050 case WMI_SCAN_EVENT_COMPLETED:
2051 switch (reason) {
2052 case WMI_SCAN_REASON_COMPLETED:
2053 return "completed";
2054 case WMI_SCAN_REASON_CANCELLED:
2055 return "completed [cancelled]";
2056 case WMI_SCAN_REASON_PREEMPTED:
2057 return "completed [preempted]";
2058 case WMI_SCAN_REASON_TIMEDOUT:
2059 return "completed [timedout]";
2060 case WMI_SCAN_REASON_INTERNAL_FAILURE:
2061 return "completed [internal err]";
2062 case WMI_SCAN_REASON_MAX:
2063 break;
2064 }
2065 return "completed [unknown]";
2066 case WMI_SCAN_EVENT_BSS_CHANNEL:
2067 return "bss channel";
2068 case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2069 return "foreign channel";
2070 case WMI_SCAN_EVENT_DEQUEUED:
2071 return "dequeued";
2072 case WMI_SCAN_EVENT_PREEMPTED:
2073 return "preempted";
2074 case WMI_SCAN_EVENT_START_FAILED:
2075 return "start failed";
2076 case WMI_SCAN_EVENT_RESTARTED:
2077 return "restarted";
2078 case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2079 return "foreign channel exit";
2080 default:
2081 return "unknown";
2082 }
2083 }
2084
ath10k_wmi_op_pull_scan_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_scan_ev_arg * arg)2085 static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
2086 struct wmi_scan_ev_arg *arg)
2087 {
2088 struct wmi_scan_event *ev = (void *)skb->data;
2089
2090 if (skb->len < sizeof(*ev))
2091 return -EPROTO;
2092
2093 skb_pull(skb, sizeof(*ev));
2094 arg->event_type = ev->event_type;
2095 arg->reason = ev->reason;
2096 arg->channel_freq = ev->channel_freq;
2097 arg->scan_req_id = ev->scan_req_id;
2098 arg->scan_id = ev->scan_id;
2099 arg->vdev_id = ev->vdev_id;
2100
2101 return 0;
2102 }
2103
ath10k_wmi_event_scan(struct ath10k * ar,struct sk_buff * skb)2104 int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
2105 {
2106 struct wmi_scan_ev_arg arg = {};
2107 enum wmi_scan_event_type event_type;
2108 enum wmi_scan_completion_reason reason;
2109 u32 freq;
2110 u32 req_id;
2111 u32 scan_id;
2112 u32 vdev_id;
2113 int ret;
2114
2115 ret = ath10k_wmi_pull_scan(ar, skb, &arg);
2116 if (ret) {
2117 ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
2118 return ret;
2119 }
2120
2121 event_type = __le32_to_cpu(arg.event_type);
2122 reason = __le32_to_cpu(arg.reason);
2123 freq = __le32_to_cpu(arg.channel_freq);
2124 req_id = __le32_to_cpu(arg.scan_req_id);
2125 scan_id = __le32_to_cpu(arg.scan_id);
2126 vdev_id = __le32_to_cpu(arg.vdev_id);
2127
2128 spin_lock_bh(&ar->data_lock);
2129
2130 ath10k_dbg(ar, ATH10K_DBG_WMI,
2131 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
2132 ath10k_wmi_event_scan_type_str(event_type, reason),
2133 event_type, reason, freq, req_id, scan_id, vdev_id,
2134 ath10k_scan_state_str(ar->scan.state), ar->scan.state);
2135
2136 switch (event_type) {
2137 case WMI_SCAN_EVENT_STARTED:
2138 ath10k_wmi_event_scan_started(ar);
2139 break;
2140 case WMI_SCAN_EVENT_COMPLETED:
2141 ath10k_wmi_event_scan_completed(ar);
2142 break;
2143 case WMI_SCAN_EVENT_BSS_CHANNEL:
2144 ath10k_wmi_event_scan_bss_chan(ar);
2145 break;
2146 case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2147 ath10k_wmi_event_scan_foreign_chan(ar, freq);
2148 break;
2149 case WMI_SCAN_EVENT_START_FAILED:
2150 ath10k_warn(ar, "received scan start failure event\n");
2151 ath10k_wmi_event_scan_start_failed(ar);
2152 break;
2153 case WMI_SCAN_EVENT_DEQUEUED:
2154 case WMI_SCAN_EVENT_PREEMPTED:
2155 case WMI_SCAN_EVENT_RESTARTED:
2156 case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2157 default:
2158 break;
2159 }
2160
2161 spin_unlock_bh(&ar->data_lock);
2162 return 0;
2163 }
2164
2165 /* If keys are configured, HW decrypts all frames
2166 * with protected bit set. Mark such frames as decrypted.
2167 */
ath10k_wmi_handle_wep_reauth(struct ath10k * ar,struct sk_buff * skb,struct ieee80211_rx_status * status)2168 static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
2169 struct sk_buff *skb,
2170 struct ieee80211_rx_status *status)
2171 {
2172 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2173 unsigned int hdrlen;
2174 bool peer_key;
2175 u8 *addr, keyidx;
2176
2177 if (!ieee80211_is_auth(hdr->frame_control) ||
2178 !ieee80211_has_protected(hdr->frame_control))
2179 return;
2180
2181 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2182 if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
2183 return;
2184
2185 keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
2186 addr = ieee80211_get_SA(hdr);
2187
2188 spin_lock_bh(&ar->data_lock);
2189 peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
2190 spin_unlock_bh(&ar->data_lock);
2191
2192 if (peer_key) {
2193 ath10k_dbg(ar, ATH10K_DBG_MAC,
2194 "mac wep key present for peer %pM\n", addr);
2195 status->flag |= RX_FLAG_DECRYPTED;
2196 }
2197 }
2198
ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_mgmt_rx_ev_arg * arg)2199 static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
2200 struct wmi_mgmt_rx_ev_arg *arg)
2201 {
2202 struct wmi_mgmt_rx_event_v1 *ev_v1;
2203 struct wmi_mgmt_rx_event_v2 *ev_v2;
2204 struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
2205 struct wmi_mgmt_rx_ext_info *ext_info;
2206 size_t pull_len;
2207 u32 msdu_len;
2208 u32 len;
2209
2210 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
2211 ar->running_fw->fw_file.fw_features)) {
2212 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
2213 ev_hdr = &ev_v2->hdr.v1;
2214 pull_len = sizeof(*ev_v2);
2215 } else {
2216 ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
2217 ev_hdr = &ev_v1->hdr;
2218 pull_len = sizeof(*ev_v1);
2219 }
2220
2221 if (skb->len < pull_len)
2222 return -EPROTO;
2223
2224 skb_pull(skb, pull_len);
2225 arg->channel = ev_hdr->channel;
2226 arg->buf_len = ev_hdr->buf_len;
2227 arg->status = ev_hdr->status;
2228 arg->snr = ev_hdr->snr;
2229 arg->phy_mode = ev_hdr->phy_mode;
2230 arg->rate = ev_hdr->rate;
2231
2232 msdu_len = __le32_to_cpu(arg->buf_len);
2233 if (skb->len < msdu_len)
2234 return -EPROTO;
2235
2236 if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2237 len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2238 ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2239 memcpy(&arg->ext_info, ext_info,
2240 sizeof(struct wmi_mgmt_rx_ext_info));
2241 }
2242 /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
2243 * trailer with credit update. Trim the excess garbage.
2244 */
2245 skb_trim(skb, msdu_len);
2246
2247 return 0;
2248 }
2249
ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_mgmt_rx_ev_arg * arg)2250 static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
2251 struct sk_buff *skb,
2252 struct wmi_mgmt_rx_ev_arg *arg)
2253 {
2254 struct wmi_10_4_mgmt_rx_event *ev;
2255 struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
2256 size_t pull_len;
2257 u32 msdu_len;
2258 struct wmi_mgmt_rx_ext_info *ext_info;
2259 u32 len;
2260
2261 ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
2262 ev_hdr = &ev->hdr;
2263 pull_len = sizeof(*ev);
2264
2265 if (skb->len < pull_len)
2266 return -EPROTO;
2267
2268 skb_pull(skb, pull_len);
2269 arg->channel = ev_hdr->channel;
2270 arg->buf_len = ev_hdr->buf_len;
2271 arg->status = ev_hdr->status;
2272 arg->snr = ev_hdr->snr;
2273 arg->phy_mode = ev_hdr->phy_mode;
2274 arg->rate = ev_hdr->rate;
2275
2276 msdu_len = __le32_to_cpu(arg->buf_len);
2277 if (skb->len < msdu_len)
2278 return -EPROTO;
2279
2280 if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2281 len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2282 ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2283 memcpy(&arg->ext_info, ext_info,
2284 sizeof(struct wmi_mgmt_rx_ext_info));
2285 }
2286
2287 /* Make sure bytes added for padding are removed. */
2288 skb_trim(skb, msdu_len);
2289
2290 return 0;
2291 }
2292
ath10k_wmi_rx_is_decrypted(struct ath10k * ar,struct ieee80211_hdr * hdr)2293 static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
2294 struct ieee80211_hdr *hdr)
2295 {
2296 if (!ieee80211_has_protected(hdr->frame_control))
2297 return false;
2298
2299 /* FW delivers WEP Shared Auth frame with Protected Bit set and
2300 * encrypted payload. However in case of PMF it delivers decrypted
2301 * frames with Protected Bit set.
2302 */
2303 if (ieee80211_is_auth(hdr->frame_control))
2304 return false;
2305
2306 /* qca99x0 based FW delivers broadcast or multicast management frames
2307 * (ex: group privacy action frames in mesh) as encrypted payload.
2308 */
2309 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
2310 ar->hw_params.sw_decrypt_mcast_mgmt)
2311 return false;
2312
2313 return true;
2314 }
2315
wmi_process_mgmt_tx_comp(struct ath10k * ar,u32 desc_id,u32 status)2316 static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
2317 u32 status)
2318 {
2319 struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
2320 struct ath10k_wmi *wmi = &ar->wmi;
2321 struct ieee80211_tx_info *info;
2322 struct sk_buff *msdu;
2323 int ret;
2324
2325 spin_lock_bh(&ar->data_lock);
2326
2327 pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id);
2328 if (!pkt_addr) {
2329 ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
2330 desc_id);
2331 ret = -ENOENT;
2332 goto out;
2333 }
2334
2335 msdu = pkt_addr->vaddr;
2336 dma_unmap_single(ar->dev, pkt_addr->paddr,
2337 msdu->len, DMA_FROM_DEVICE);
2338 info = IEEE80211_SKB_CB(msdu);
2339 info->flags |= status;
2340 ieee80211_tx_status_irqsafe(ar->hw, msdu);
2341
2342 ret = 0;
2343
2344 out:
2345 idr_remove(&wmi->mgmt_pending_tx, desc_id);
2346 spin_unlock_bh(&ar->data_lock);
2347 return ret;
2348 }
2349
ath10k_wmi_event_mgmt_tx_compl(struct ath10k * ar,struct sk_buff * skb)2350 int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
2351 {
2352 struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
2353 int ret;
2354
2355 ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
2356 if (ret) {
2357 ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
2358 return ret;
2359 }
2360
2361 wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_id),
2362 __le32_to_cpu(arg.status));
2363
2364 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
2365
2366 return 0;
2367 }
2368
ath10k_wmi_event_mgmt_rx(struct ath10k * ar,struct sk_buff * skb)2369 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
2370 {
2371 struct wmi_mgmt_rx_ev_arg arg = {};
2372 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2373 struct ieee80211_hdr *hdr;
2374 struct ieee80211_supported_band *sband;
2375 u32 rx_status;
2376 u32 channel;
2377 u32 phy_mode;
2378 u32 snr;
2379 u32 rate;
2380 u16 fc;
2381 int ret;
2382
2383 ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
2384 if (ret) {
2385 ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
2386 dev_kfree_skb(skb);
2387 return ret;
2388 }
2389
2390 channel = __le32_to_cpu(arg.channel);
2391 rx_status = __le32_to_cpu(arg.status);
2392 snr = __le32_to_cpu(arg.snr);
2393 phy_mode = __le32_to_cpu(arg.phy_mode);
2394 rate = __le32_to_cpu(arg.rate);
2395
2396 memset(status, 0, sizeof(*status));
2397
2398 ath10k_dbg(ar, ATH10K_DBG_MGMT,
2399 "event mgmt rx status %08x\n", rx_status);
2400
2401 if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) ||
2402 (rx_status & (WMI_RX_STATUS_ERR_DECRYPT |
2403 WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
2404 dev_kfree_skb(skb);
2405 return 0;
2406 }
2407
2408 if (rx_status & WMI_RX_STATUS_ERR_MIC)
2409 status->flag |= RX_FLAG_MMIC_ERROR;
2410
2411 if (rx_status & WMI_RX_STATUS_EXT_INFO) {
2412 status->mactime =
2413 __le64_to_cpu(arg.ext_info.rx_mac_timestamp);
2414 status->flag |= RX_FLAG_MACTIME_END;
2415 }
2416 /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
2417 * MODE_11B. This means phy_mode is not a reliable source for the band
2418 * of mgmt rx.
2419 */
2420 if (channel >= 1 && channel <= 14) {
2421 status->band = NL80211_BAND_2GHZ;
2422 } else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) {
2423 status->band = NL80211_BAND_5GHZ;
2424 } else {
2425 /* Shouldn't happen unless list of advertised channels to
2426 * mac80211 has been changed.
2427 */
2428 WARN_ON_ONCE(1);
2429 dev_kfree_skb(skb);
2430 return 0;
2431 }
2432
2433 if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
2434 ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
2435
2436 sband = &ar->mac.sbands[status->band];
2437
2438 status->freq = ieee80211_channel_to_frequency(channel, status->band);
2439 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
2440 status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
2441
2442 hdr = (struct ieee80211_hdr *)skb->data;
2443 fc = le16_to_cpu(hdr->frame_control);
2444
2445 /* Firmware is guaranteed to report all essential management frames via
2446 * WMI while it can deliver some extra via HTT. Since there can be
2447 * duplicates split the reporting wrt monitor/sniffing.
2448 */
2449 status->flag |= RX_FLAG_SKIP_MONITOR;
2450
2451 ath10k_wmi_handle_wep_reauth(ar, skb, status);
2452
2453 if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
2454 status->flag |= RX_FLAG_DECRYPTED;
2455
2456 if (!ieee80211_is_action(hdr->frame_control) &&
2457 !ieee80211_is_deauth(hdr->frame_control) &&
2458 !ieee80211_is_disassoc(hdr->frame_control)) {
2459 status->flag |= RX_FLAG_IV_STRIPPED |
2460 RX_FLAG_MMIC_STRIPPED;
2461 hdr->frame_control = __cpu_to_le16(fc &
2462 ~IEEE80211_FCTL_PROTECTED);
2463 }
2464 }
2465
2466 if (ieee80211_is_beacon(hdr->frame_control))
2467 ath10k_mac_handle_beacon(ar, skb);
2468
2469 ath10k_dbg(ar, ATH10K_DBG_MGMT,
2470 "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
2471 skb, skb->len,
2472 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
2473
2474 ath10k_dbg(ar, ATH10K_DBG_MGMT,
2475 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
2476 status->freq, status->band, status->signal,
2477 status->rate_idx);
2478
2479 ieee80211_rx(ar->hw, skb);
2480 return 0;
2481 }
2482
freq_to_idx(struct ath10k * ar,int freq)2483 static int freq_to_idx(struct ath10k *ar, int freq)
2484 {
2485 struct ieee80211_supported_band *sband;
2486 int band, ch, idx = 0;
2487
2488 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
2489 sband = ar->hw->wiphy->bands[band];
2490 if (!sband)
2491 continue;
2492
2493 for (ch = 0; ch < sband->n_channels; ch++, idx++)
2494 if (sband->channels[ch].center_freq == freq)
2495 goto exit;
2496 }
2497
2498 exit:
2499 return idx;
2500 }
2501
ath10k_wmi_op_pull_ch_info_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_ch_info_ev_arg * arg)2502 static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
2503 struct wmi_ch_info_ev_arg *arg)
2504 {
2505 struct wmi_chan_info_event *ev = (void *)skb->data;
2506
2507 if (skb->len < sizeof(*ev))
2508 return -EPROTO;
2509
2510 skb_pull(skb, sizeof(*ev));
2511 arg->err_code = ev->err_code;
2512 arg->freq = ev->freq;
2513 arg->cmd_flags = ev->cmd_flags;
2514 arg->noise_floor = ev->noise_floor;
2515 arg->rx_clear_count = ev->rx_clear_count;
2516 arg->cycle_count = ev->cycle_count;
2517
2518 return 0;
2519 }
2520
ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_ch_info_ev_arg * arg)2521 static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
2522 struct sk_buff *skb,
2523 struct wmi_ch_info_ev_arg *arg)
2524 {
2525 struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
2526
2527 if (skb->len < sizeof(*ev))
2528 return -EPROTO;
2529
2530 skb_pull(skb, sizeof(*ev));
2531 arg->err_code = ev->err_code;
2532 arg->freq = ev->freq;
2533 arg->cmd_flags = ev->cmd_flags;
2534 arg->noise_floor = ev->noise_floor;
2535 arg->rx_clear_count = ev->rx_clear_count;
2536 arg->cycle_count = ev->cycle_count;
2537 arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
2538 arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
2539 arg->rx_frame_count = ev->rx_frame_count;
2540
2541 return 0;
2542 }
2543
ath10k_wmi_event_chan_info(struct ath10k * ar,struct sk_buff * skb)2544 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
2545 {
2546 struct wmi_ch_info_ev_arg arg = {};
2547 struct survey_info *survey;
2548 u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
2549 int idx, ret;
2550
2551 ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
2552 if (ret) {
2553 ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
2554 return;
2555 }
2556
2557 err_code = __le32_to_cpu(arg.err_code);
2558 freq = __le32_to_cpu(arg.freq);
2559 cmd_flags = __le32_to_cpu(arg.cmd_flags);
2560 noise_floor = __le32_to_cpu(arg.noise_floor);
2561 rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
2562 cycle_count = __le32_to_cpu(arg.cycle_count);
2563
2564 ath10k_dbg(ar, ATH10K_DBG_WMI,
2565 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
2566 err_code, freq, cmd_flags, noise_floor, rx_clear_count,
2567 cycle_count);
2568
2569 spin_lock_bh(&ar->data_lock);
2570
2571 switch (ar->scan.state) {
2572 case ATH10K_SCAN_IDLE:
2573 case ATH10K_SCAN_STARTING:
2574 ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
2575 goto exit;
2576 case ATH10K_SCAN_RUNNING:
2577 case ATH10K_SCAN_ABORTING:
2578 break;
2579 }
2580
2581 idx = freq_to_idx(ar, freq);
2582 if (idx >= ARRAY_SIZE(ar->survey)) {
2583 ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2584 freq, idx);
2585 goto exit;
2586 }
2587
2588 if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2589 if (ar->ch_info_can_report_survey) {
2590 survey = &ar->survey[idx];
2591 survey->noise = noise_floor;
2592 survey->filled = SURVEY_INFO_NOISE_DBM;
2593
2594 ath10k_hw_fill_survey_time(ar,
2595 survey,
2596 cycle_count,
2597 rx_clear_count,
2598 ar->survey_last_cycle_count,
2599 ar->survey_last_rx_clear_count);
2600 }
2601
2602 ar->ch_info_can_report_survey = false;
2603 } else {
2604 ar->ch_info_can_report_survey = true;
2605 }
2606
2607 if (!(cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
2608 ar->survey_last_rx_clear_count = rx_clear_count;
2609 ar->survey_last_cycle_count = cycle_count;
2610 }
2611
2612 exit:
2613 spin_unlock_bh(&ar->data_lock);
2614 }
2615
ath10k_wmi_event_echo(struct ath10k * ar,struct sk_buff * skb)2616 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
2617 {
2618 struct wmi_echo_ev_arg arg = {};
2619 int ret;
2620
2621 ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
2622 if (ret) {
2623 ath10k_warn(ar, "failed to parse echo: %d\n", ret);
2624 return;
2625 }
2626
2627 ath10k_dbg(ar, ATH10K_DBG_WMI,
2628 "wmi event echo value 0x%08x\n",
2629 le32_to_cpu(arg.value));
2630
2631 if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
2632 complete(&ar->wmi.barrier);
2633 }
2634
ath10k_wmi_event_debug_mesg(struct ath10k * ar,struct sk_buff * skb)2635 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
2636 {
2637 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
2638 skb->len);
2639
2640 trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
2641
2642 return 0;
2643 }
2644
ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base * src,struct ath10k_fw_stats_pdev * dst)2645 void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
2646 struct ath10k_fw_stats_pdev *dst)
2647 {
2648 dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
2649 dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
2650 dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
2651 dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
2652 dst->cycle_count = __le32_to_cpu(src->cycle_count);
2653 dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
2654 dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
2655 }
2656
ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx * src,struct ath10k_fw_stats_pdev * dst)2657 void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
2658 struct ath10k_fw_stats_pdev *dst)
2659 {
2660 dst->comp_queued = __le32_to_cpu(src->comp_queued);
2661 dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2662 dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2663 dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2664 dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2665 dst->local_enqued = __le32_to_cpu(src->local_enqued);
2666 dst->local_freed = __le32_to_cpu(src->local_freed);
2667 dst->hw_queued = __le32_to_cpu(src->hw_queued);
2668 dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2669 dst->underrun = __le32_to_cpu(src->underrun);
2670 dst->tx_abort = __le32_to_cpu(src->tx_abort);
2671 dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
2672 dst->tx_ko = __le32_to_cpu(src->tx_ko);
2673 dst->data_rc = __le32_to_cpu(src->data_rc);
2674 dst->self_triggers = __le32_to_cpu(src->self_triggers);
2675 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2676 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2677 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2678 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2679 dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2680 dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2681 dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2682 }
2683
2684 static void
ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx * src,struct ath10k_fw_stats_pdev * dst)2685 ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
2686 struct ath10k_fw_stats_pdev *dst)
2687 {
2688 dst->comp_queued = __le32_to_cpu(src->comp_queued);
2689 dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2690 dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2691 dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2692 dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2693 dst->local_enqued = __le32_to_cpu(src->local_enqued);
2694 dst->local_freed = __le32_to_cpu(src->local_freed);
2695 dst->hw_queued = __le32_to_cpu(src->hw_queued);
2696 dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2697 dst->underrun = __le32_to_cpu(src->underrun);
2698 dst->tx_abort = __le32_to_cpu(src->tx_abort);
2699 dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
2700 dst->tx_ko = __le32_to_cpu(src->tx_ko);
2701 dst->data_rc = __le32_to_cpu(src->data_rc);
2702 dst->self_triggers = __le32_to_cpu(src->self_triggers);
2703 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2704 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2705 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2706 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2707 dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2708 dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2709 dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2710 dst->hw_paused = __le32_to_cpu(src->hw_paused);
2711 dst->seq_posted = __le32_to_cpu(src->seq_posted);
2712 dst->seq_failed_queueing =
2713 __le32_to_cpu(src->seq_failed_queueing);
2714 dst->seq_completed = __le32_to_cpu(src->seq_completed);
2715 dst->seq_restarted = __le32_to_cpu(src->seq_restarted);
2716 dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted);
2717 dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush);
2718 dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2719 dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated);
2720 dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed);
2721 dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2722 dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired);
2723 }
2724
ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx * src,struct ath10k_fw_stats_pdev * dst)2725 void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
2726 struct ath10k_fw_stats_pdev *dst)
2727 {
2728 dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
2729 dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
2730 dst->r0_frags = __le32_to_cpu(src->r0_frags);
2731 dst->r1_frags = __le32_to_cpu(src->r1_frags);
2732 dst->r2_frags = __le32_to_cpu(src->r2_frags);
2733 dst->r3_frags = __le32_to_cpu(src->r3_frags);
2734 dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
2735 dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
2736 dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
2737 dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
2738 dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
2739 dst->phy_errs = __le32_to_cpu(src->phy_errs);
2740 dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
2741 dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
2742 }
2743
ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra * src,struct ath10k_fw_stats_pdev * dst)2744 void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
2745 struct ath10k_fw_stats_pdev *dst)
2746 {
2747 dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
2748 dst->rts_bad = __le32_to_cpu(src->rts_bad);
2749 dst->rts_good = __le32_to_cpu(src->rts_good);
2750 dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
2751 dst->no_beacons = __le32_to_cpu(src->no_beacons);
2752 dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
2753 }
2754
ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats * src,struct ath10k_fw_stats_peer * dst)2755 void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
2756 struct ath10k_fw_stats_peer *dst)
2757 {
2758 ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2759 dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2760 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2761 }
2762
2763 static void
ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats * src,struct ath10k_fw_stats_peer * dst)2764 ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
2765 struct ath10k_fw_stats_peer *dst)
2766 {
2767 ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2768 dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2769 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2770 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
2771 }
2772
2773 static void
ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd * src,struct ath10k_fw_stats_vdev_extd * dst)2774 ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd *src,
2775 struct ath10k_fw_stats_vdev_extd *dst)
2776 {
2777 dst->vdev_id = __le32_to_cpu(src->vdev_id);
2778 dst->ppdu_aggr_cnt = __le32_to_cpu(src->ppdu_aggr_cnt);
2779 dst->ppdu_noack = __le32_to_cpu(src->ppdu_noack);
2780 dst->mpdu_queued = __le32_to_cpu(src->mpdu_queued);
2781 dst->ppdu_nonaggr_cnt = __le32_to_cpu(src->ppdu_nonaggr_cnt);
2782 dst->mpdu_sw_requeued = __le32_to_cpu(src->mpdu_sw_requeued);
2783 dst->mpdu_suc_retry = __le32_to_cpu(src->mpdu_suc_retry);
2784 dst->mpdu_suc_multitry = __le32_to_cpu(src->mpdu_suc_multitry);
2785 dst->mpdu_fail_retry = __le32_to_cpu(src->mpdu_fail_retry);
2786 dst->tx_ftm_suc = __le32_to_cpu(src->tx_ftm_suc);
2787 dst->tx_ftm_suc_retry = __le32_to_cpu(src->tx_ftm_suc_retry);
2788 dst->tx_ftm_fail = __le32_to_cpu(src->tx_ftm_fail);
2789 dst->rx_ftmr_cnt = __le32_to_cpu(src->rx_ftmr_cnt);
2790 dst->rx_ftmr_dup_cnt = __le32_to_cpu(src->rx_ftmr_dup_cnt);
2791 dst->rx_iftmr_cnt = __le32_to_cpu(src->rx_iftmr_cnt);
2792 dst->rx_iftmr_dup_cnt = __le32_to_cpu(src->rx_iftmr_dup_cnt);
2793 }
2794
ath10k_wmi_main_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)2795 static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
2796 struct sk_buff *skb,
2797 struct ath10k_fw_stats *stats)
2798 {
2799 const struct wmi_stats_event *ev = (void *)skb->data;
2800 u32 num_pdev_stats, num_peer_stats;
2801 int i;
2802
2803 if (!skb_pull(skb, sizeof(*ev)))
2804 return -EPROTO;
2805
2806 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
2807 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
2808
2809 for (i = 0; i < num_pdev_stats; i++) {
2810 const struct wmi_pdev_stats *src;
2811 struct ath10k_fw_stats_pdev *dst;
2812
2813 src = (void *)skb->data;
2814 if (!skb_pull(skb, sizeof(*src)))
2815 return -EPROTO;
2816
2817 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
2818 if (!dst)
2819 continue;
2820
2821 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
2822 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
2823 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
2824
2825 list_add_tail(&dst->list, &stats->pdevs);
2826 }
2827
2828 /* fw doesn't implement vdev stats */
2829
2830 for (i = 0; i < num_peer_stats; i++) {
2831 const struct wmi_peer_stats *src;
2832 struct ath10k_fw_stats_peer *dst;
2833
2834 src = (void *)skb->data;
2835 if (!skb_pull(skb, sizeof(*src)))
2836 return -EPROTO;
2837
2838 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
2839 if (!dst)
2840 continue;
2841
2842 ath10k_wmi_pull_peer_stats(src, dst);
2843 list_add_tail(&dst->list, &stats->peers);
2844 }
2845
2846 return 0;
2847 }
2848
ath10k_wmi_10x_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)2849 static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
2850 struct sk_buff *skb,
2851 struct ath10k_fw_stats *stats)
2852 {
2853 const struct wmi_stats_event *ev = (void *)skb->data;
2854 u32 num_pdev_stats, num_peer_stats;
2855 int i;
2856
2857 if (!skb_pull(skb, sizeof(*ev)))
2858 return -EPROTO;
2859
2860 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
2861 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
2862
2863 for (i = 0; i < num_pdev_stats; i++) {
2864 const struct wmi_10x_pdev_stats *src;
2865 struct ath10k_fw_stats_pdev *dst;
2866
2867 src = (void *)skb->data;
2868 if (!skb_pull(skb, sizeof(*src)))
2869 return -EPROTO;
2870
2871 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
2872 if (!dst)
2873 continue;
2874
2875 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
2876 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
2877 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
2878 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
2879
2880 list_add_tail(&dst->list, &stats->pdevs);
2881 }
2882
2883 /* fw doesn't implement vdev stats */
2884
2885 for (i = 0; i < num_peer_stats; i++) {
2886 const struct wmi_10x_peer_stats *src;
2887 struct ath10k_fw_stats_peer *dst;
2888
2889 src = (void *)skb->data;
2890 if (!skb_pull(skb, sizeof(*src)))
2891 return -EPROTO;
2892
2893 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
2894 if (!dst)
2895 continue;
2896
2897 ath10k_wmi_pull_peer_stats(&src->old, dst);
2898
2899 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
2900
2901 list_add_tail(&dst->list, &stats->peers);
2902 }
2903
2904 return 0;
2905 }
2906
ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)2907 static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
2908 struct sk_buff *skb,
2909 struct ath10k_fw_stats *stats)
2910 {
2911 const struct wmi_10_2_stats_event *ev = (void *)skb->data;
2912 u32 num_pdev_stats;
2913 u32 num_pdev_ext_stats;
2914 u32 num_peer_stats;
2915 int i;
2916
2917 if (!skb_pull(skb, sizeof(*ev)))
2918 return -EPROTO;
2919
2920 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
2921 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
2922 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
2923
2924 for (i = 0; i < num_pdev_stats; i++) {
2925 const struct wmi_10_2_pdev_stats *src;
2926 struct ath10k_fw_stats_pdev *dst;
2927
2928 src = (void *)skb->data;
2929 if (!skb_pull(skb, sizeof(*src)))
2930 return -EPROTO;
2931
2932 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
2933 if (!dst)
2934 continue;
2935
2936 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
2937 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
2938 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
2939 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
2940 /* FIXME: expose 10.2 specific values */
2941
2942 list_add_tail(&dst->list, &stats->pdevs);
2943 }
2944
2945 for (i = 0; i < num_pdev_ext_stats; i++) {
2946 const struct wmi_10_2_pdev_ext_stats *src;
2947
2948 src = (void *)skb->data;
2949 if (!skb_pull(skb, sizeof(*src)))
2950 return -EPROTO;
2951
2952 /* FIXME: expose values to userspace
2953 *
2954 * Note: Even though this loop seems to do nothing it is
2955 * required to parse following sub-structures properly.
2956 */
2957 }
2958
2959 /* fw doesn't implement vdev stats */
2960
2961 for (i = 0; i < num_peer_stats; i++) {
2962 const struct wmi_10_2_peer_stats *src;
2963 struct ath10k_fw_stats_peer *dst;
2964
2965 src = (void *)skb->data;
2966 if (!skb_pull(skb, sizeof(*src)))
2967 return -EPROTO;
2968
2969 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
2970 if (!dst)
2971 continue;
2972
2973 ath10k_wmi_pull_peer_stats(&src->old, dst);
2974
2975 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
2976 /* FIXME: expose 10.2 specific values */
2977
2978 list_add_tail(&dst->list, &stats->peers);
2979 }
2980
2981 return 0;
2982 }
2983
ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)2984 static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
2985 struct sk_buff *skb,
2986 struct ath10k_fw_stats *stats)
2987 {
2988 const struct wmi_10_2_stats_event *ev = (void *)skb->data;
2989 u32 num_pdev_stats;
2990 u32 num_pdev_ext_stats;
2991 u32 num_peer_stats;
2992 int i;
2993
2994 if (!skb_pull(skb, sizeof(*ev)))
2995 return -EPROTO;
2996
2997 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
2998 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
2999 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3000
3001 for (i = 0; i < num_pdev_stats; i++) {
3002 const struct wmi_10_2_pdev_stats *src;
3003 struct ath10k_fw_stats_pdev *dst;
3004
3005 src = (void *)skb->data;
3006 if (!skb_pull(skb, sizeof(*src)))
3007 return -EPROTO;
3008
3009 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3010 if (!dst)
3011 continue;
3012
3013 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3014 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3015 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3016 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3017 /* FIXME: expose 10.2 specific values */
3018
3019 list_add_tail(&dst->list, &stats->pdevs);
3020 }
3021
3022 for (i = 0; i < num_pdev_ext_stats; i++) {
3023 const struct wmi_10_2_pdev_ext_stats *src;
3024
3025 src = (void *)skb->data;
3026 if (!skb_pull(skb, sizeof(*src)))
3027 return -EPROTO;
3028
3029 /* FIXME: expose values to userspace
3030 *
3031 * Note: Even though this loop seems to do nothing it is
3032 * required to parse following sub-structures properly.
3033 */
3034 }
3035
3036 /* fw doesn't implement vdev stats */
3037
3038 for (i = 0; i < num_peer_stats; i++) {
3039 const struct wmi_10_2_4_ext_peer_stats *src;
3040 struct ath10k_fw_stats_peer *dst;
3041 int stats_len;
3042
3043 if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
3044 stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
3045 else
3046 stats_len = sizeof(struct wmi_10_2_4_peer_stats);
3047
3048 src = (void *)skb->data;
3049 if (!skb_pull(skb, stats_len))
3050 return -EPROTO;
3051
3052 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3053 if (!dst)
3054 continue;
3055
3056 ath10k_wmi_pull_peer_stats(&src->common.old, dst);
3057
3058 dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
3059
3060 if (ath10k_peer_stats_enabled(ar))
3061 dst->rx_duration = __le32_to_cpu(src->rx_duration);
3062 /* FIXME: expose 10.2 specific values */
3063
3064 list_add_tail(&dst->list, &stats->peers);
3065 }
3066
3067 return 0;
3068 }
3069
ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3070 static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
3071 struct sk_buff *skb,
3072 struct ath10k_fw_stats *stats)
3073 {
3074 const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3075 u32 num_pdev_stats;
3076 u32 num_pdev_ext_stats;
3077 u32 num_vdev_stats;
3078 u32 num_peer_stats;
3079 u32 num_bcnflt_stats;
3080 u32 stats_id;
3081 int i;
3082
3083 if (!skb_pull(skb, sizeof(*ev)))
3084 return -EPROTO;
3085
3086 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3087 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3088 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
3089 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3090 num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
3091 stats_id = __le32_to_cpu(ev->stats_id);
3092
3093 for (i = 0; i < num_pdev_stats; i++) {
3094 const struct wmi_10_4_pdev_stats *src;
3095 struct ath10k_fw_stats_pdev *dst;
3096
3097 src = (void *)skb->data;
3098 if (!skb_pull(skb, sizeof(*src)))
3099 return -EPROTO;
3100
3101 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3102 if (!dst)
3103 continue;
3104
3105 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3106 ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst);
3107 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3108 dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs);
3109 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3110
3111 list_add_tail(&dst->list, &stats->pdevs);
3112 }
3113
3114 for (i = 0; i < num_pdev_ext_stats; i++) {
3115 const struct wmi_10_2_pdev_ext_stats *src;
3116
3117 src = (void *)skb->data;
3118 if (!skb_pull(skb, sizeof(*src)))
3119 return -EPROTO;
3120
3121 /* FIXME: expose values to userspace
3122 *
3123 * Note: Even though this loop seems to do nothing it is
3124 * required to parse following sub-structures properly.
3125 */
3126 }
3127
3128 for (i = 0; i < num_vdev_stats; i++) {
3129 const struct wmi_vdev_stats *src;
3130
3131 /* Ignore vdev stats here as it has only vdev id. Actual vdev
3132 * stats will be retrieved from vdev extended stats.
3133 */
3134 src = (void *)skb->data;
3135 if (!skb_pull(skb, sizeof(*src)))
3136 return -EPROTO;
3137 }
3138
3139 for (i = 0; i < num_peer_stats; i++) {
3140 const struct wmi_10_4_peer_stats *src;
3141 struct ath10k_fw_stats_peer *dst;
3142
3143 src = (void *)skb->data;
3144 if (!skb_pull(skb, sizeof(*src)))
3145 return -EPROTO;
3146
3147 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3148 if (!dst)
3149 continue;
3150
3151 ath10k_wmi_10_4_pull_peer_stats(src, dst);
3152 list_add_tail(&dst->list, &stats->peers);
3153 }
3154
3155 for (i = 0; i < num_bcnflt_stats; i++) {
3156 const struct wmi_10_4_bss_bcn_filter_stats *src;
3157
3158 src = (void *)skb->data;
3159 if (!skb_pull(skb, sizeof(*src)))
3160 return -EPROTO;
3161
3162 /* FIXME: expose values to userspace
3163 *
3164 * Note: Even though this loop seems to do nothing it is
3165 * required to parse following sub-structures properly.
3166 */
3167 }
3168
3169 if (stats_id & WMI_10_4_STAT_PEER_EXTD) {
3170 stats->extended = true;
3171
3172 for (i = 0; i < num_peer_stats; i++) {
3173 const struct wmi_10_4_peer_extd_stats *src;
3174 struct ath10k_fw_extd_stats_peer *dst;
3175
3176 src = (void *)skb->data;
3177 if (!skb_pull(skb, sizeof(*src)))
3178 return -EPROTO;
3179
3180 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3181 if (!dst)
3182 continue;
3183
3184 ether_addr_copy(dst->peer_macaddr,
3185 src->peer_macaddr.addr);
3186 dst->rx_duration = __le32_to_cpu(src->rx_duration);
3187 list_add_tail(&dst->list, &stats->peers_extd);
3188 }
3189 }
3190
3191 if (stats_id & WMI_10_4_STAT_VDEV_EXTD) {
3192 for (i = 0; i < num_vdev_stats; i++) {
3193 const struct wmi_vdev_stats_extd *src;
3194 struct ath10k_fw_stats_vdev_extd *dst;
3195
3196 src = (void *)skb->data;
3197 if (!skb_pull(skb, sizeof(*src)))
3198 return -EPROTO;
3199
3200 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3201 if (!dst)
3202 continue;
3203 ath10k_wmi_10_4_pull_vdev_stats(src, dst);
3204 list_add_tail(&dst->list, &stats->vdevs);
3205 }
3206 }
3207
3208 return 0;
3209 }
3210
ath10k_wmi_event_update_stats(struct ath10k * ar,struct sk_buff * skb)3211 void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
3212 {
3213 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
3214 ath10k_debug_fw_stats_process(ar, skb);
3215 }
3216
3217 static int
ath10k_wmi_op_pull_vdev_start_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_vdev_start_ev_arg * arg)3218 ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
3219 struct wmi_vdev_start_ev_arg *arg)
3220 {
3221 struct wmi_vdev_start_response_event *ev = (void *)skb->data;
3222
3223 if (skb->len < sizeof(*ev))
3224 return -EPROTO;
3225
3226 skb_pull(skb, sizeof(*ev));
3227 arg->vdev_id = ev->vdev_id;
3228 arg->req_id = ev->req_id;
3229 arg->resp_type = ev->resp_type;
3230 arg->status = ev->status;
3231
3232 return 0;
3233 }
3234
ath10k_wmi_event_vdev_start_resp(struct ath10k * ar,struct sk_buff * skb)3235 void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
3236 {
3237 struct wmi_vdev_start_ev_arg arg = {};
3238 int ret;
3239
3240 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
3241
3242 ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
3243 if (ret) {
3244 ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
3245 return;
3246 }
3247
3248 if (WARN_ON(__le32_to_cpu(arg.status)))
3249 return;
3250
3251 complete(&ar->vdev_setup_done);
3252 }
3253
ath10k_wmi_event_vdev_stopped(struct ath10k * ar,struct sk_buff * skb)3254 void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
3255 {
3256 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
3257 complete(&ar->vdev_setup_done);
3258 }
3259
3260 static int
ath10k_wmi_op_pull_peer_kick_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_peer_kick_ev_arg * arg)3261 ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
3262 struct wmi_peer_kick_ev_arg *arg)
3263 {
3264 struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
3265
3266 if (skb->len < sizeof(*ev))
3267 return -EPROTO;
3268
3269 skb_pull(skb, sizeof(*ev));
3270 arg->mac_addr = ev->peer_macaddr.addr;
3271
3272 return 0;
3273 }
3274
ath10k_wmi_event_peer_sta_kickout(struct ath10k * ar,struct sk_buff * skb)3275 void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
3276 {
3277 struct wmi_peer_kick_ev_arg arg = {};
3278 struct ieee80211_sta *sta;
3279 int ret;
3280
3281 ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
3282 if (ret) {
3283 ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
3284 ret);
3285 return;
3286 }
3287
3288 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
3289 arg.mac_addr);
3290
3291 rcu_read_lock();
3292
3293 sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
3294 if (!sta) {
3295 ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
3296 arg.mac_addr);
3297 goto exit;
3298 }
3299
3300 ieee80211_report_low_ack(sta, 10);
3301
3302 exit:
3303 rcu_read_unlock();
3304 }
3305
3306 /*
3307 * FIXME
3308 *
3309 * We don't report to mac80211 sleep state of connected
3310 * stations. Due to this mac80211 can't fill in TIM IE
3311 * correctly.
3312 *
3313 * I know of no way of getting nullfunc frames that contain
3314 * sleep transition from connected stations - these do not
3315 * seem to be sent from the target to the host. There also
3316 * doesn't seem to be a dedicated event for that. So the
3317 * only way left to do this would be to read tim_bitmap
3318 * during SWBA.
3319 *
3320 * We could probably try using tim_bitmap from SWBA to tell
3321 * mac80211 which stations are asleep and which are not. The
3322 * problem here is calling mac80211 functions so many times
3323 * could take too long and make us miss the time to submit
3324 * the beacon to the target.
3325 *
3326 * So as a workaround we try to extend the TIM IE if there
3327 * is unicast buffered for stations with aid > 7 and fill it
3328 * in ourselves.
3329 */
ath10k_wmi_update_tim(struct ath10k * ar,struct ath10k_vif * arvif,struct sk_buff * bcn,const struct wmi_tim_info_arg * tim_info)3330 static void ath10k_wmi_update_tim(struct ath10k *ar,
3331 struct ath10k_vif *arvif,
3332 struct sk_buff *bcn,
3333 const struct wmi_tim_info_arg *tim_info)
3334 {
3335 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
3336 struct ieee80211_tim_ie *tim;
3337 u8 *ies, *ie;
3338 u8 ie_len, pvm_len;
3339 __le32 t;
3340 u32 v, tim_len;
3341
3342 /* When FW reports 0 in tim_len, ensure atleast first byte
3343 * in tim_bitmap is considered for pvm calculation.
3344 */
3345 tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
3346
3347 /* if next SWBA has no tim_changed the tim_bitmap is garbage.
3348 * we must copy the bitmap upon change and reuse it later
3349 */
3350 if (__le32_to_cpu(tim_info->tim_changed)) {
3351 int i;
3352
3353 if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
3354 ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
3355 tim_len, sizeof(arvif->u.ap.tim_bitmap));
3356 tim_len = sizeof(arvif->u.ap.tim_bitmap);
3357 }
3358
3359 for (i = 0; i < tim_len; i++) {
3360 t = tim_info->tim_bitmap[i / 4];
3361 v = __le32_to_cpu(t);
3362 arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
3363 }
3364
3365 /* FW reports either length 0 or length based on max supported
3366 * station. so we calculate this on our own
3367 */
3368 arvif->u.ap.tim_len = 0;
3369 for (i = 0; i < tim_len; i++)
3370 if (arvif->u.ap.tim_bitmap[i])
3371 arvif->u.ap.tim_len = i;
3372
3373 arvif->u.ap.tim_len++;
3374 }
3375
3376 ies = bcn->data;
3377 ies += ieee80211_hdrlen(hdr->frame_control);
3378 ies += 12; /* fixed parameters */
3379
3380 ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
3381 (u8 *)skb_tail_pointer(bcn) - ies);
3382 if (!ie) {
3383 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
3384 ath10k_warn(ar, "no tim ie found;\n");
3385 return;
3386 }
3387
3388 tim = (void *)ie + 2;
3389 ie_len = ie[1];
3390 pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
3391
3392 if (pvm_len < arvif->u.ap.tim_len) {
3393 int expand_size = tim_len - pvm_len;
3394 int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
3395 void *next_ie = ie + 2 + ie_len;
3396
3397 if (skb_put(bcn, expand_size)) {
3398 memmove(next_ie + expand_size, next_ie, move_size);
3399
3400 ie[1] += expand_size;
3401 ie_len += expand_size;
3402 pvm_len += expand_size;
3403 } else {
3404 ath10k_warn(ar, "tim expansion failed\n");
3405 }
3406 }
3407
3408 if (pvm_len > tim_len) {
3409 ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
3410 return;
3411 }
3412
3413 tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
3414 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
3415
3416 if (tim->dtim_count == 0) {
3417 ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO;
3418
3419 if (__le32_to_cpu(tim_info->tim_mcast) == 1)
3420 ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB;
3421 }
3422
3423 ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
3424 tim->dtim_count, tim->dtim_period,
3425 tim->bitmap_ctrl, pvm_len);
3426 }
3427
ath10k_wmi_update_noa(struct ath10k * ar,struct ath10k_vif * arvif,struct sk_buff * bcn,const struct wmi_p2p_noa_info * noa)3428 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
3429 struct sk_buff *bcn,
3430 const struct wmi_p2p_noa_info *noa)
3431 {
3432 if (!arvif->vif->p2p)
3433 return;
3434
3435 ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
3436
3437 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
3438 ath10k_p2p_noa_update(arvif, noa);
3439
3440 if (arvif->u.ap.noa_data)
3441 if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
3442 skb_put_data(bcn, arvif->u.ap.noa_data,
3443 arvif->u.ap.noa_len);
3444 }
3445
ath10k_wmi_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3446 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
3447 struct wmi_swba_ev_arg *arg)
3448 {
3449 struct wmi_host_swba_event *ev = (void *)skb->data;
3450 u32 map;
3451 size_t i;
3452
3453 if (skb->len < sizeof(*ev))
3454 return -EPROTO;
3455
3456 skb_pull(skb, sizeof(*ev));
3457 arg->vdev_map = ev->vdev_map;
3458
3459 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3460 if (!(map & BIT(0)))
3461 continue;
3462
3463 /* If this happens there were some changes in firmware and
3464 * ath10k should update the max size of tim_info array.
3465 */
3466 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3467 break;
3468
3469 if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3470 sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3471 ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3472 return -EPROTO;
3473 }
3474
3475 arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3476 arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3477 arg->tim_info[i].tim_bitmap =
3478 ev->bcn_info[i].tim_info.tim_bitmap;
3479 arg->tim_info[i].tim_changed =
3480 ev->bcn_info[i].tim_info.tim_changed;
3481 arg->tim_info[i].tim_num_ps_pending =
3482 ev->bcn_info[i].tim_info.tim_num_ps_pending;
3483
3484 arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
3485 i++;
3486 }
3487
3488 return 0;
3489 }
3490
ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3491 static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar,
3492 struct sk_buff *skb,
3493 struct wmi_swba_ev_arg *arg)
3494 {
3495 struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
3496 u32 map;
3497 size_t i;
3498
3499 if (skb->len < sizeof(*ev))
3500 return -EPROTO;
3501
3502 skb_pull(skb, sizeof(*ev));
3503 arg->vdev_map = ev->vdev_map;
3504
3505 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3506 if (!(map & BIT(0)))
3507 continue;
3508
3509 /* If this happens there were some changes in firmware and
3510 * ath10k should update the max size of tim_info array.
3511 */
3512 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3513 break;
3514
3515 if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3516 sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3517 ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3518 return -EPROTO;
3519 }
3520
3521 arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3522 arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3523 arg->tim_info[i].tim_bitmap =
3524 ev->bcn_info[i].tim_info.tim_bitmap;
3525 arg->tim_info[i].tim_changed =
3526 ev->bcn_info[i].tim_info.tim_changed;
3527 arg->tim_info[i].tim_num_ps_pending =
3528 ev->bcn_info[i].tim_info.tim_num_ps_pending;
3529 i++;
3530 }
3531
3532 return 0;
3533 }
3534
ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3535 static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
3536 struct sk_buff *skb,
3537 struct wmi_swba_ev_arg *arg)
3538 {
3539 struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
3540 u32 map, tim_len;
3541 size_t i;
3542
3543 if (skb->len < sizeof(*ev))
3544 return -EPROTO;
3545
3546 skb_pull(skb, sizeof(*ev));
3547 arg->vdev_map = ev->vdev_map;
3548
3549 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3550 if (!(map & BIT(0)))
3551 continue;
3552
3553 /* If this happens there were some changes in firmware and
3554 * ath10k should update the max size of tim_info array.
3555 */
3556 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3557 break;
3558
3559 if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3560 sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3561 ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3562 return -EPROTO;
3563 }
3564
3565 tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
3566 if (tim_len) {
3567 /* Exclude 4 byte guard length */
3568 tim_len -= 4;
3569 arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
3570 } else {
3571 arg->tim_info[i].tim_len = 0;
3572 }
3573
3574 arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3575 arg->tim_info[i].tim_bitmap =
3576 ev->bcn_info[i].tim_info.tim_bitmap;
3577 arg->tim_info[i].tim_changed =
3578 ev->bcn_info[i].tim_info.tim_changed;
3579 arg->tim_info[i].tim_num_ps_pending =
3580 ev->bcn_info[i].tim_info.tim_num_ps_pending;
3581
3582 /* 10.4 firmware doesn't have p2p support. notice of absence
3583 * info can be ignored for now.
3584 */
3585
3586 i++;
3587 }
3588
3589 return 0;
3590 }
3591
ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k * ar)3592 static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
3593 {
3594 return WMI_TXBF_CONF_BEFORE_ASSOC;
3595 }
3596
ath10k_wmi_event_host_swba(struct ath10k * ar,struct sk_buff * skb)3597 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
3598 {
3599 struct wmi_swba_ev_arg arg = {};
3600 u32 map;
3601 int i = -1;
3602 const struct wmi_tim_info_arg *tim_info;
3603 const struct wmi_p2p_noa_info *noa_info;
3604 struct ath10k_vif *arvif;
3605 struct sk_buff *bcn;
3606 dma_addr_t paddr;
3607 int ret, vdev_id = 0;
3608
3609 ret = ath10k_wmi_pull_swba(ar, skb, &arg);
3610 if (ret) {
3611 ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
3612 return;
3613 }
3614
3615 map = __le32_to_cpu(arg.vdev_map);
3616
3617 ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
3618 map);
3619
3620 for (; map; map >>= 1, vdev_id++) {
3621 if (!(map & 0x1))
3622 continue;
3623
3624 i++;
3625
3626 if (i >= WMI_MAX_AP_VDEV) {
3627 ath10k_warn(ar, "swba has corrupted vdev map\n");
3628 break;
3629 }
3630
3631 tim_info = &arg.tim_info[i];
3632 noa_info = arg.noa_info[i];
3633
3634 ath10k_dbg(ar, ATH10K_DBG_MGMT,
3635 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
3636 i,
3637 __le32_to_cpu(tim_info->tim_len),
3638 __le32_to_cpu(tim_info->tim_mcast),
3639 __le32_to_cpu(tim_info->tim_changed),
3640 __le32_to_cpu(tim_info->tim_num_ps_pending),
3641 __le32_to_cpu(tim_info->tim_bitmap[3]),
3642 __le32_to_cpu(tim_info->tim_bitmap[2]),
3643 __le32_to_cpu(tim_info->tim_bitmap[1]),
3644 __le32_to_cpu(tim_info->tim_bitmap[0]));
3645
3646 /* TODO: Only first 4 word from tim_bitmap is dumped.
3647 * Extend debug code to dump full tim_bitmap.
3648 */
3649
3650 arvif = ath10k_get_arvif(ar, vdev_id);
3651 if (arvif == NULL) {
3652 ath10k_warn(ar, "no vif for vdev_id %d found\n",
3653 vdev_id);
3654 continue;
3655 }
3656
3657 /* mac80211 would have already asked us to stop beaconing and
3658 * bring the vdev down, so continue in that case
3659 */
3660 if (!arvif->is_up)
3661 continue;
3662
3663 /* There are no completions for beacons so wait for next SWBA
3664 * before telling mac80211 to decrement CSA counter
3665 *
3666 * Once CSA counter is completed stop sending beacons until
3667 * actual channel switch is done
3668 */
3669 if (arvif->vif->csa_active &&
3670 ieee80211_csa_is_complete(arvif->vif)) {
3671 ieee80211_csa_finish(arvif->vif);
3672 continue;
3673 }
3674
3675 bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
3676 if (!bcn) {
3677 ath10k_warn(ar, "could not get mac80211 beacon\n");
3678 continue;
3679 }
3680
3681 ath10k_tx_h_seq_no(arvif->vif, bcn);
3682 ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
3683 ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
3684
3685 spin_lock_bh(&ar->data_lock);
3686
3687 if (arvif->beacon) {
3688 switch (arvif->beacon_state) {
3689 case ATH10K_BEACON_SENT:
3690 break;
3691 case ATH10K_BEACON_SCHEDULED:
3692 ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
3693 arvif->vdev_id);
3694 break;
3695 case ATH10K_BEACON_SENDING:
3696 ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
3697 arvif->vdev_id);
3698 dev_kfree_skb(bcn);
3699 goto skip;
3700 }
3701
3702 ath10k_mac_vif_beacon_free(arvif);
3703 }
3704
3705 if (!arvif->beacon_buf) {
3706 paddr = dma_map_single(arvif->ar->dev, bcn->data,
3707 bcn->len, DMA_TO_DEVICE);
3708 ret = dma_mapping_error(arvif->ar->dev, paddr);
3709 if (ret) {
3710 ath10k_warn(ar, "failed to map beacon: %d\n",
3711 ret);
3712 dev_kfree_skb_any(bcn);
3713 goto skip;
3714 }
3715
3716 ATH10K_SKB_CB(bcn)->paddr = paddr;
3717 } else {
3718 if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
3719 ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
3720 bcn->len, IEEE80211_MAX_FRAME_LEN);
3721 skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
3722 }
3723 memcpy(arvif->beacon_buf, bcn->data, bcn->len);
3724 ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
3725 }
3726
3727 arvif->beacon = bcn;
3728 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
3729
3730 trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
3731 trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
3732
3733 skip:
3734 spin_unlock_bh(&ar->data_lock);
3735 }
3736
3737 ath10k_wmi_tx_beacons_nowait(ar);
3738 }
3739
ath10k_wmi_event_tbttoffset_update(struct ath10k * ar,struct sk_buff * skb)3740 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
3741 {
3742 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
3743 }
3744
ath10k_radar_detected(struct ath10k * ar)3745 static void ath10k_radar_detected(struct ath10k *ar)
3746 {
3747 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
3748 ATH10K_DFS_STAT_INC(ar, radar_detected);
3749
3750 /* Control radar events reporting in debugfs file
3751 * dfs_block_radar_events
3752 */
3753 if (ar->dfs_block_radar_events)
3754 ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
3755 else
3756 ieee80211_radar_detected(ar->hw);
3757 }
3758
ath10k_radar_confirmation_work(struct work_struct * work)3759 static void ath10k_radar_confirmation_work(struct work_struct *work)
3760 {
3761 struct ath10k *ar = container_of(work, struct ath10k,
3762 radar_confirmation_work);
3763 struct ath10k_radar_found_info radar_info;
3764 int ret, time_left;
3765
3766 reinit_completion(&ar->wmi.radar_confirm);
3767
3768 spin_lock_bh(&ar->data_lock);
3769 memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info));
3770 spin_unlock_bh(&ar->data_lock);
3771
3772 ret = ath10k_wmi_report_radar_found(ar, &radar_info);
3773 if (ret) {
3774 ath10k_warn(ar, "failed to send radar found %d\n", ret);
3775 goto wait_complete;
3776 }
3777
3778 time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm,
3779 ATH10K_WMI_DFS_CONF_TIMEOUT_HZ);
3780 if (time_left) {
3781 /* DFS Confirmation status event received and
3782 * necessary action completed.
3783 */
3784 goto wait_complete;
3785 } else {
3786 /* DFS Confirmation event not received from FW.Considering this
3787 * as real radar.
3788 */
3789 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3790 "dfs confirmation not received from fw, considering as radar\n");
3791 goto radar_detected;
3792 }
3793
3794 radar_detected:
3795 ath10k_radar_detected(ar);
3796
3797 /* Reset state to allow sending confirmation on consecutive radar
3798 * detections, unless radar confirmation is disabled/stopped.
3799 */
3800 wait_complete:
3801 spin_lock_bh(&ar->data_lock);
3802 if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED)
3803 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
3804 spin_unlock_bh(&ar->data_lock);
3805 }
3806
ath10k_dfs_radar_report(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,const struct phyerr_radar_report * rr,u64 tsf)3807 static void ath10k_dfs_radar_report(struct ath10k *ar,
3808 struct wmi_phyerr_ev_arg *phyerr,
3809 const struct phyerr_radar_report *rr,
3810 u64 tsf)
3811 {
3812 u32 reg0, reg1, tsf32l;
3813 struct ieee80211_channel *ch;
3814 struct pulse_event pe;
3815 struct radar_detector_specs rs;
3816 u64 tsf64;
3817 u8 rssi, width;
3818 struct ath10k_radar_found_info *radar_info;
3819
3820 reg0 = __le32_to_cpu(rr->reg0);
3821 reg1 = __le32_to_cpu(rr->reg1);
3822
3823 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3824 "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
3825 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
3826 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
3827 MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
3828 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
3829 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3830 "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
3831 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
3832 MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
3833 MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
3834 MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
3835 MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
3836 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3837 "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
3838 MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
3839 MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
3840
3841 if (!ar->dfs_detector)
3842 return;
3843
3844 spin_lock_bh(&ar->data_lock);
3845 ch = ar->rx_channel;
3846
3847 /* fetch target operating channel during channel change */
3848 if (!ch)
3849 ch = ar->tgt_oper_chan;
3850
3851 spin_unlock_bh(&ar->data_lock);
3852
3853 if (!ch) {
3854 ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
3855 goto radar_detected;
3856 }
3857
3858 /* report event to DFS pattern detector */
3859 tsf32l = phyerr->tsf_timestamp;
3860 tsf64 = tsf & (~0xFFFFFFFFULL);
3861 tsf64 |= tsf32l;
3862
3863 width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
3864 rssi = phyerr->rssi_combined;
3865
3866 /* hardware store this as 8 bit signed value,
3867 * set to zero if negative number
3868 */
3869 if (rssi & 0x80)
3870 rssi = 0;
3871
3872 pe.ts = tsf64;
3873 pe.freq = ch->center_freq;
3874 pe.width = width;
3875 pe.rssi = rssi;
3876 pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
3877 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3878 "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
3879 pe.freq, pe.width, pe.rssi, pe.ts);
3880
3881 ATH10K_DFS_STAT_INC(ar, pulses_detected);
3882
3883 if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) {
3884 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3885 "dfs no pulse pattern detected, yet\n");
3886 return;
3887 }
3888
3889 if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) &&
3890 ar->dfs_detector->region == NL80211_DFS_FCC) {
3891 /* Consecutive radar indications need not be
3892 * sent to the firmware until we get confirmation
3893 * for the previous detected radar.
3894 */
3895 spin_lock_bh(&ar->data_lock);
3896 if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) {
3897 spin_unlock_bh(&ar->data_lock);
3898 return;
3899 }
3900 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS;
3901 radar_info = &ar->last_radar_info;
3902
3903 radar_info->pri_min = rs.pri_min;
3904 radar_info->pri_max = rs.pri_max;
3905 radar_info->width_min = rs.width_min;
3906 radar_info->width_max = rs.width_max;
3907 /*TODO Find sidx_min and sidx_max */
3908 radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
3909 radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
3910
3911 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3912 "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
3913 radar_info->pri_min, radar_info->pri_max,
3914 radar_info->width_min, radar_info->width_max,
3915 radar_info->sidx_min, radar_info->sidx_max);
3916 ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work);
3917 spin_unlock_bh(&ar->data_lock);
3918 return;
3919 }
3920
3921 radar_detected:
3922 ath10k_radar_detected(ar);
3923 }
3924
ath10k_dfs_fft_report(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,const struct phyerr_fft_report * fftr,u64 tsf)3925 static int ath10k_dfs_fft_report(struct ath10k *ar,
3926 struct wmi_phyerr_ev_arg *phyerr,
3927 const struct phyerr_fft_report *fftr,
3928 u64 tsf)
3929 {
3930 u32 reg0, reg1;
3931 u8 rssi, peak_mag;
3932
3933 reg0 = __le32_to_cpu(fftr->reg0);
3934 reg1 = __le32_to_cpu(fftr->reg1);
3935 rssi = phyerr->rssi_combined;
3936
3937 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3938 "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
3939 MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
3940 MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
3941 MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
3942 MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
3943 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3944 "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
3945 MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
3946 MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
3947 MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
3948 MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
3949
3950 peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
3951
3952 /* false event detection */
3953 if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
3954 peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
3955 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
3956 ATH10K_DFS_STAT_INC(ar, pulses_discarded);
3957 return -EINVAL;
3958 }
3959
3960 return 0;
3961 }
3962
ath10k_wmi_event_dfs(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,u64 tsf)3963 void ath10k_wmi_event_dfs(struct ath10k *ar,
3964 struct wmi_phyerr_ev_arg *phyerr,
3965 u64 tsf)
3966 {
3967 int buf_len, tlv_len, res, i = 0;
3968 const struct phyerr_tlv *tlv;
3969 const struct phyerr_radar_report *rr;
3970 const struct phyerr_fft_report *fftr;
3971 const u8 *tlv_buf;
3972
3973 buf_len = phyerr->buf_len;
3974 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3975 "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
3976 phyerr->phy_err_code, phyerr->rssi_combined,
3977 phyerr->tsf_timestamp, tsf, buf_len);
3978
3979 /* Skip event if DFS disabled */
3980 if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
3981 return;
3982
3983 ATH10K_DFS_STAT_INC(ar, pulses_total);
3984
3985 while (i < buf_len) {
3986 if (i + sizeof(*tlv) > buf_len) {
3987 ath10k_warn(ar, "too short buf for tlv header (%d)\n",
3988 i);
3989 return;
3990 }
3991
3992 tlv = (struct phyerr_tlv *)&phyerr->buf[i];
3993 tlv_len = __le16_to_cpu(tlv->len);
3994 tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
3995 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3996 "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
3997 tlv_len, tlv->tag, tlv->sig);
3998
3999 switch (tlv->tag) {
4000 case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
4001 if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
4002 ath10k_warn(ar, "too short radar pulse summary (%d)\n",
4003 i);
4004 return;
4005 }
4006
4007 rr = (struct phyerr_radar_report *)tlv_buf;
4008 ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
4009 break;
4010 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4011 if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
4012 ath10k_warn(ar, "too short fft report (%d)\n",
4013 i);
4014 return;
4015 }
4016
4017 fftr = (struct phyerr_fft_report *)tlv_buf;
4018 res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
4019 if (res)
4020 return;
4021 break;
4022 }
4023
4024 i += sizeof(*tlv) + tlv_len;
4025 }
4026 }
4027
ath10k_wmi_event_spectral_scan(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,u64 tsf)4028 void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
4029 struct wmi_phyerr_ev_arg *phyerr,
4030 u64 tsf)
4031 {
4032 int buf_len, tlv_len, res, i = 0;
4033 struct phyerr_tlv *tlv;
4034 const void *tlv_buf;
4035 const struct phyerr_fft_report *fftr;
4036 size_t fftr_len;
4037
4038 buf_len = phyerr->buf_len;
4039
4040 while (i < buf_len) {
4041 if (i + sizeof(*tlv) > buf_len) {
4042 ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
4043 i);
4044 return;
4045 }
4046
4047 tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4048 tlv_len = __le16_to_cpu(tlv->len);
4049 tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4050
4051 if (i + sizeof(*tlv) + tlv_len > buf_len) {
4052 ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
4053 i);
4054 return;
4055 }
4056
4057 switch (tlv->tag) {
4058 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4059 if (sizeof(*fftr) > tlv_len) {
4060 ath10k_warn(ar, "failed to parse fft report at byte %d\n",
4061 i);
4062 return;
4063 }
4064
4065 fftr_len = tlv_len - sizeof(*fftr);
4066 fftr = tlv_buf;
4067 res = ath10k_spectral_process_fft(ar, phyerr,
4068 fftr, fftr_len,
4069 tsf);
4070 if (res < 0) {
4071 ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
4072 res);
4073 return;
4074 }
4075 break;
4076 }
4077
4078 i += sizeof(*tlv) + tlv_len;
4079 }
4080 }
4081
ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k * ar,struct sk_buff * skb,struct wmi_phyerr_hdr_arg * arg)4082 static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4083 struct sk_buff *skb,
4084 struct wmi_phyerr_hdr_arg *arg)
4085 {
4086 struct wmi_phyerr_event *ev = (void *)skb->data;
4087
4088 if (skb->len < sizeof(*ev))
4089 return -EPROTO;
4090
4091 arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
4092 arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4093 arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4094 arg->buf_len = skb->len - sizeof(*ev);
4095 arg->phyerrs = ev->phyerrs;
4096
4097 return 0;
4098 }
4099
ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k * ar,struct sk_buff * skb,struct wmi_phyerr_hdr_arg * arg)4100 static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4101 struct sk_buff *skb,
4102 struct wmi_phyerr_hdr_arg *arg)
4103 {
4104 struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
4105
4106 if (skb->len < sizeof(*ev))
4107 return -EPROTO;
4108
4109 /* 10.4 firmware always reports only one phyerr */
4110 arg->num_phyerrs = 1;
4111
4112 arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4113 arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4114 arg->buf_len = skb->len;
4115 arg->phyerrs = skb->data;
4116
4117 return 0;
4118 }
4119
ath10k_wmi_op_pull_phyerr_ev(struct ath10k * ar,const void * phyerr_buf,int left_len,struct wmi_phyerr_ev_arg * arg)4120 int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
4121 const void *phyerr_buf,
4122 int left_len,
4123 struct wmi_phyerr_ev_arg *arg)
4124 {
4125 const struct wmi_phyerr *phyerr = phyerr_buf;
4126 int i;
4127
4128 if (left_len < sizeof(*phyerr)) {
4129 ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4130 left_len, sizeof(*phyerr));
4131 return -EINVAL;
4132 }
4133
4134 arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4135 arg->freq1 = __le16_to_cpu(phyerr->freq1);
4136 arg->freq2 = __le16_to_cpu(phyerr->freq2);
4137 arg->rssi_combined = phyerr->rssi_combined;
4138 arg->chan_width_mhz = phyerr->chan_width_mhz;
4139 arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4140 arg->buf = phyerr->buf;
4141 arg->hdr_len = sizeof(*phyerr);
4142
4143 for (i = 0; i < 4; i++)
4144 arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4145
4146 switch (phyerr->phy_err_code) {
4147 case PHY_ERROR_GEN_SPECTRAL_SCAN:
4148 arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4149 break;
4150 case PHY_ERROR_GEN_FALSE_RADAR_EXT:
4151 arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
4152 break;
4153 case PHY_ERROR_GEN_RADAR:
4154 arg->phy_err_code = PHY_ERROR_RADAR;
4155 break;
4156 default:
4157 arg->phy_err_code = PHY_ERROR_UNKNOWN;
4158 break;
4159 }
4160
4161 return 0;
4162 }
4163
ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k * ar,const void * phyerr_buf,int left_len,struct wmi_phyerr_ev_arg * arg)4164 static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
4165 const void *phyerr_buf,
4166 int left_len,
4167 struct wmi_phyerr_ev_arg *arg)
4168 {
4169 const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
4170 u32 phy_err_mask;
4171 int i;
4172
4173 if (left_len < sizeof(*phyerr)) {
4174 ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4175 left_len, sizeof(*phyerr));
4176 return -EINVAL;
4177 }
4178
4179 arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4180 arg->freq1 = __le16_to_cpu(phyerr->freq1);
4181 arg->freq2 = __le16_to_cpu(phyerr->freq2);
4182 arg->rssi_combined = phyerr->rssi_combined;
4183 arg->chan_width_mhz = phyerr->chan_width_mhz;
4184 arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4185 arg->buf = phyerr->buf;
4186 arg->hdr_len = sizeof(*phyerr);
4187
4188 for (i = 0; i < 4; i++)
4189 arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4190
4191 phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
4192
4193 if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
4194 arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4195 else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
4196 arg->phy_err_code = PHY_ERROR_RADAR;
4197 else
4198 arg->phy_err_code = PHY_ERROR_UNKNOWN;
4199
4200 return 0;
4201 }
4202
ath10k_wmi_event_phyerr(struct ath10k * ar,struct sk_buff * skb)4203 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
4204 {
4205 struct wmi_phyerr_hdr_arg hdr_arg = {};
4206 struct wmi_phyerr_ev_arg phyerr_arg = {};
4207 const void *phyerr;
4208 u32 count, i, buf_len, phy_err_code;
4209 u64 tsf;
4210 int left_len, ret;
4211
4212 ATH10K_DFS_STAT_INC(ar, phy_errors);
4213
4214 ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
4215 if (ret) {
4216 ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
4217 return;
4218 }
4219
4220 /* Check number of included events */
4221 count = hdr_arg.num_phyerrs;
4222
4223 left_len = hdr_arg.buf_len;
4224
4225 tsf = hdr_arg.tsf_u32;
4226 tsf <<= 32;
4227 tsf |= hdr_arg.tsf_l32;
4228
4229 ath10k_dbg(ar, ATH10K_DBG_WMI,
4230 "wmi event phyerr count %d tsf64 0x%llX\n",
4231 count, tsf);
4232
4233 phyerr = hdr_arg.phyerrs;
4234 for (i = 0; i < count; i++) {
4235 ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
4236 if (ret) {
4237 ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
4238 i);
4239 return;
4240 }
4241
4242 left_len -= phyerr_arg.hdr_len;
4243 buf_len = phyerr_arg.buf_len;
4244 phy_err_code = phyerr_arg.phy_err_code;
4245
4246 if (left_len < buf_len) {
4247 ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
4248 return;
4249 }
4250
4251 left_len -= buf_len;
4252
4253 switch (phy_err_code) {
4254 case PHY_ERROR_RADAR:
4255 ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4256 break;
4257 case PHY_ERROR_SPECTRAL_SCAN:
4258 ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4259 break;
4260 case PHY_ERROR_FALSE_RADAR_EXT:
4261 ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4262 ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4263 break;
4264 default:
4265 break;
4266 }
4267
4268 phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
4269 }
4270 }
4271
4272 static int
ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_dfs_status_ev_arg * arg)4273 ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb,
4274 struct wmi_dfs_status_ev_arg *arg)
4275 {
4276 struct wmi_dfs_status_ev_arg *ev = (void *)skb->data;
4277
4278 if (skb->len < sizeof(*ev))
4279 return -EPROTO;
4280
4281 arg->status = ev->status;
4282
4283 return 0;
4284 }
4285
4286 static void
ath10k_wmi_event_dfs_status_check(struct ath10k * ar,struct sk_buff * skb)4287 ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb)
4288 {
4289 struct wmi_dfs_status_ev_arg status_arg = {};
4290 int ret;
4291
4292 ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg);
4293
4294 if (ret) {
4295 ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret);
4296 return;
4297 }
4298
4299 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4300 "dfs status event received from fw: %d\n",
4301 status_arg.status);
4302
4303 /* Even in case of radar detection failure we follow the same
4304 * behaviour as if radar is detected i.e to switch to a different
4305 * channel.
4306 */
4307 if (status_arg.status == WMI_HW_RADAR_DETECTED ||
4308 status_arg.status == WMI_RADAR_DETECTION_FAIL)
4309 ath10k_radar_detected(ar);
4310 complete(&ar->wmi.radar_confirm);
4311 }
4312
ath10k_wmi_event_roam(struct ath10k * ar,struct sk_buff * skb)4313 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
4314 {
4315 struct wmi_roam_ev_arg arg = {};
4316 int ret;
4317 u32 vdev_id;
4318 u32 reason;
4319 s32 rssi;
4320
4321 ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
4322 if (ret) {
4323 ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
4324 return;
4325 }
4326
4327 vdev_id = __le32_to_cpu(arg.vdev_id);
4328 reason = __le32_to_cpu(arg.reason);
4329 rssi = __le32_to_cpu(arg.rssi);
4330 rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
4331
4332 ath10k_dbg(ar, ATH10K_DBG_WMI,
4333 "wmi roam event vdev %u reason 0x%08x rssi %d\n",
4334 vdev_id, reason, rssi);
4335
4336 if (reason >= WMI_ROAM_REASON_MAX)
4337 ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
4338 reason, vdev_id);
4339
4340 switch (reason) {
4341 case WMI_ROAM_REASON_BEACON_MISS:
4342 ath10k_mac_handle_beacon_miss(ar, vdev_id);
4343 break;
4344 case WMI_ROAM_REASON_BETTER_AP:
4345 case WMI_ROAM_REASON_LOW_RSSI:
4346 case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
4347 case WMI_ROAM_REASON_HO_FAILED:
4348 ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
4349 reason, vdev_id);
4350 break;
4351 }
4352 }
4353
ath10k_wmi_event_profile_match(struct ath10k * ar,struct sk_buff * skb)4354 void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
4355 {
4356 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
4357 }
4358
ath10k_wmi_event_debug_print(struct ath10k * ar,struct sk_buff * skb)4359 void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
4360 {
4361 char buf[101], c;
4362 int i;
4363
4364 for (i = 0; i < sizeof(buf) - 1; i++) {
4365 if (i >= skb->len)
4366 break;
4367
4368 c = skb->data[i];
4369
4370 if (c == '\0')
4371 break;
4372
4373 if (isascii(c) && isprint(c))
4374 buf[i] = c;
4375 else
4376 buf[i] = '.';
4377 }
4378
4379 if (i == sizeof(buf) - 1)
4380 ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
4381
4382 /* for some reason the debug prints end with \n, remove that */
4383 if (skb->data[i - 1] == '\n')
4384 i--;
4385
4386 /* the last byte is always reserved for the null character */
4387 buf[i] = '\0';
4388
4389 ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
4390 }
4391
ath10k_wmi_event_pdev_qvit(struct ath10k * ar,struct sk_buff * skb)4392 void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
4393 {
4394 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
4395 }
4396
ath10k_wmi_event_wlan_profile_data(struct ath10k * ar,struct sk_buff * skb)4397 void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
4398 {
4399 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
4400 }
4401
ath10k_wmi_event_rtt_measurement_report(struct ath10k * ar,struct sk_buff * skb)4402 void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
4403 struct sk_buff *skb)
4404 {
4405 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
4406 }
4407
ath10k_wmi_event_tsf_measurement_report(struct ath10k * ar,struct sk_buff * skb)4408 void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
4409 struct sk_buff *skb)
4410 {
4411 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
4412 }
4413
ath10k_wmi_event_rtt_error_report(struct ath10k * ar,struct sk_buff * skb)4414 void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
4415 {
4416 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
4417 }
4418
ath10k_wmi_event_wow_wakeup_host(struct ath10k * ar,struct sk_buff * skb)4419 void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
4420 {
4421 struct wmi_wow_ev_arg ev = {};
4422 int ret;
4423
4424 complete(&ar->wow.wakeup_completed);
4425
4426 ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
4427 if (ret) {
4428 ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
4429 return;
4430 }
4431
4432 ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
4433 wow_reason(ev.wake_reason));
4434 }
4435
ath10k_wmi_event_dcs_interference(struct ath10k * ar,struct sk_buff * skb)4436 void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
4437 {
4438 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
4439 }
4440
ath10k_tpc_config_get_rate(struct ath10k * ar,struct wmi_pdev_tpc_config_event * ev,u32 rate_idx,u32 num_chains,u32 rate_code,u8 type)4441 static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
4442 struct wmi_pdev_tpc_config_event *ev,
4443 u32 rate_idx, u32 num_chains,
4444 u32 rate_code, u8 type)
4445 {
4446 u8 tpc, num_streams, preamble, ch, stm_idx;
4447
4448 num_streams = ATH10K_HW_NSS(rate_code);
4449 preamble = ATH10K_HW_PREAMBLE(rate_code);
4450 ch = num_chains - 1;
4451
4452 tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
4453
4454 if (__le32_to_cpu(ev->num_tx_chain) <= 1)
4455 goto out;
4456
4457 if (preamble == WMI_RATE_PREAMBLE_CCK)
4458 goto out;
4459
4460 stm_idx = num_streams - 1;
4461 if (num_chains <= num_streams)
4462 goto out;
4463
4464 switch (type) {
4465 case WMI_TPC_TABLE_TYPE_STBC:
4466 tpc = min_t(u8, tpc,
4467 ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
4468 break;
4469 case WMI_TPC_TABLE_TYPE_TXBF:
4470 tpc = min_t(u8, tpc,
4471 ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
4472 break;
4473 case WMI_TPC_TABLE_TYPE_CDD:
4474 tpc = min_t(u8, tpc,
4475 ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
4476 break;
4477 default:
4478 ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
4479 tpc = 0;
4480 break;
4481 }
4482
4483 out:
4484 return tpc;
4485 }
4486
ath10k_tpc_config_disp_tables(struct ath10k * ar,struct wmi_pdev_tpc_config_event * ev,struct ath10k_tpc_stats * tpc_stats,u8 * rate_code,u16 * pream_table,u8 type)4487 static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
4488 struct wmi_pdev_tpc_config_event *ev,
4489 struct ath10k_tpc_stats *tpc_stats,
4490 u8 *rate_code, u16 *pream_table, u8 type)
4491 {
4492 u32 i, j, pream_idx, flags;
4493 u8 tpc[WMI_TPC_TX_N_CHAIN];
4494 char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
4495 char buff[WMI_TPC_BUF_SIZE];
4496
4497 flags = __le32_to_cpu(ev->flags);
4498
4499 switch (type) {
4500 case WMI_TPC_TABLE_TYPE_CDD:
4501 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
4502 ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
4503 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4504 return;
4505 }
4506 break;
4507 case WMI_TPC_TABLE_TYPE_STBC:
4508 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
4509 ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
4510 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4511 return;
4512 }
4513 break;
4514 case WMI_TPC_TABLE_TYPE_TXBF:
4515 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
4516 ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
4517 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4518 return;
4519 }
4520 break;
4521 default:
4522 ath10k_dbg(ar, ATH10K_DBG_WMI,
4523 "invalid table type in wmi tpc event: %d\n", type);
4524 return;
4525 }
4526
4527 pream_idx = 0;
4528 for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
4529 memset(tpc_value, 0, sizeof(tpc_value));
4530 memset(buff, 0, sizeof(buff));
4531 if (i == pream_table[pream_idx])
4532 pream_idx++;
4533
4534 for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
4535 if (j >= __le32_to_cpu(ev->num_tx_chain))
4536 break;
4537
4538 tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
4539 rate_code[i],
4540 type);
4541 snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
4542 strlcat(tpc_value, buff, sizeof(tpc_value));
4543 }
4544 tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
4545 tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
4546 memcpy(tpc_stats->tpc_table[type].tpc_value[i],
4547 tpc_value, sizeof(tpc_value));
4548 }
4549 }
4550
ath10k_wmi_tpc_config_get_rate_code(u8 * rate_code,u16 * pream_table,u32 num_tx_chain)4551 void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
4552 u32 num_tx_chain)
4553 {
4554 u32 i, j, pream_idx;
4555 u8 rate_idx;
4556
4557 /* Create the rate code table based on the chains supported */
4558 rate_idx = 0;
4559 pream_idx = 0;
4560
4561 /* Fill CCK rate code */
4562 for (i = 0; i < 4; i++) {
4563 rate_code[rate_idx] =
4564 ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
4565 rate_idx++;
4566 }
4567 pream_table[pream_idx] = rate_idx;
4568 pream_idx++;
4569
4570 /* Fill OFDM rate code */
4571 for (i = 0; i < 8; i++) {
4572 rate_code[rate_idx] =
4573 ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
4574 rate_idx++;
4575 }
4576 pream_table[pream_idx] = rate_idx;
4577 pream_idx++;
4578
4579 /* Fill HT20 rate code */
4580 for (i = 0; i < num_tx_chain; i++) {
4581 for (j = 0; j < 8; j++) {
4582 rate_code[rate_idx] =
4583 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4584 rate_idx++;
4585 }
4586 }
4587 pream_table[pream_idx] = rate_idx;
4588 pream_idx++;
4589
4590 /* Fill HT40 rate code */
4591 for (i = 0; i < num_tx_chain; i++) {
4592 for (j = 0; j < 8; j++) {
4593 rate_code[rate_idx] =
4594 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4595 rate_idx++;
4596 }
4597 }
4598 pream_table[pream_idx] = rate_idx;
4599 pream_idx++;
4600
4601 /* Fill VHT20 rate code */
4602 for (i = 0; i < num_tx_chain; i++) {
4603 for (j = 0; j < 10; j++) {
4604 rate_code[rate_idx] =
4605 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4606 rate_idx++;
4607 }
4608 }
4609 pream_table[pream_idx] = rate_idx;
4610 pream_idx++;
4611
4612 /* Fill VHT40 rate code */
4613 for (i = 0; i < num_tx_chain; i++) {
4614 for (j = 0; j < 10; j++) {
4615 rate_code[rate_idx] =
4616 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4617 rate_idx++;
4618 }
4619 }
4620 pream_table[pream_idx] = rate_idx;
4621 pream_idx++;
4622
4623 /* Fill VHT80 rate code */
4624 for (i = 0; i < num_tx_chain; i++) {
4625 for (j = 0; j < 10; j++) {
4626 rate_code[rate_idx] =
4627 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4628 rate_idx++;
4629 }
4630 }
4631 pream_table[pream_idx] = rate_idx;
4632 pream_idx++;
4633
4634 rate_code[rate_idx++] =
4635 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4636 rate_code[rate_idx++] =
4637 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4638 rate_code[rate_idx++] =
4639 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4640 rate_code[rate_idx++] =
4641 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4642 rate_code[rate_idx++] =
4643 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4644
4645 pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
4646 }
4647
ath10k_wmi_event_pdev_tpc_config(struct ath10k * ar,struct sk_buff * skb)4648 void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
4649 {
4650 u32 num_tx_chain;
4651 u8 rate_code[WMI_TPC_RATE_MAX];
4652 u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
4653 struct wmi_pdev_tpc_config_event *ev;
4654 struct ath10k_tpc_stats *tpc_stats;
4655
4656 ev = (struct wmi_pdev_tpc_config_event *)skb->data;
4657
4658 num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4659
4660 if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
4661 ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n",
4662 num_tx_chain, WMI_TPC_TX_N_CHAIN);
4663 return;
4664 }
4665
4666 tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
4667 if (!tpc_stats)
4668 return;
4669
4670 ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
4671 num_tx_chain);
4672
4673 tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
4674 tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
4675 tpc_stats->ctl = __le32_to_cpu(ev->ctl);
4676 tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
4677 tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
4678 tpc_stats->twice_antenna_reduction =
4679 __le32_to_cpu(ev->twice_antenna_reduction);
4680 tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
4681 tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
4682 tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4683 tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
4684
4685 ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4686 rate_code, pream_table,
4687 WMI_TPC_TABLE_TYPE_CDD);
4688 ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4689 rate_code, pream_table,
4690 WMI_TPC_TABLE_TYPE_STBC);
4691 ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4692 rate_code, pream_table,
4693 WMI_TPC_TABLE_TYPE_TXBF);
4694
4695 ath10k_debug_tpc_stats_process(ar, tpc_stats);
4696
4697 ath10k_dbg(ar, ATH10K_DBG_WMI,
4698 "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
4699 __le32_to_cpu(ev->chan_freq),
4700 __le32_to_cpu(ev->phy_mode),
4701 __le32_to_cpu(ev->ctl),
4702 __le32_to_cpu(ev->reg_domain),
4703 a_sle32_to_cpu(ev->twice_antenna_gain),
4704 __le32_to_cpu(ev->twice_antenna_reduction),
4705 __le32_to_cpu(ev->power_limit),
4706 __le32_to_cpu(ev->twice_max_rd_power) / 2,
4707 __le32_to_cpu(ev->num_tx_chain),
4708 __le32_to_cpu(ev->rate_max));
4709 }
4710
4711 static u8
ath10k_wmi_tpc_final_get_rate(struct ath10k * ar,struct wmi_pdev_tpc_final_table_event * ev,u32 rate_idx,u32 num_chains,u32 rate_code,u8 type,u32 pream_idx)4712 ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
4713 struct wmi_pdev_tpc_final_table_event *ev,
4714 u32 rate_idx, u32 num_chains,
4715 u32 rate_code, u8 type, u32 pream_idx)
4716 {
4717 u8 tpc, num_streams, preamble, ch, stm_idx;
4718 s8 pow_agcdd, pow_agstbc, pow_agtxbf;
4719 int pream;
4720
4721 num_streams = ATH10K_HW_NSS(rate_code);
4722 preamble = ATH10K_HW_PREAMBLE(rate_code);
4723 ch = num_chains - 1;
4724 stm_idx = num_streams - 1;
4725 pream = -1;
4726
4727 if (__le32_to_cpu(ev->chan_freq) <= 2483) {
4728 switch (pream_idx) {
4729 case WMI_TPC_PREAM_2GHZ_CCK:
4730 pream = 0;
4731 break;
4732 case WMI_TPC_PREAM_2GHZ_OFDM:
4733 pream = 1;
4734 break;
4735 case WMI_TPC_PREAM_2GHZ_HT20:
4736 case WMI_TPC_PREAM_2GHZ_VHT20:
4737 pream = 2;
4738 break;
4739 case WMI_TPC_PREAM_2GHZ_HT40:
4740 case WMI_TPC_PREAM_2GHZ_VHT40:
4741 pream = 3;
4742 break;
4743 case WMI_TPC_PREAM_2GHZ_VHT80:
4744 pream = 4;
4745 break;
4746 default:
4747 pream = -1;
4748 break;
4749 }
4750 }
4751
4752 if (__le32_to_cpu(ev->chan_freq) >= 5180) {
4753 switch (pream_idx) {
4754 case WMI_TPC_PREAM_5GHZ_OFDM:
4755 pream = 0;
4756 break;
4757 case WMI_TPC_PREAM_5GHZ_HT20:
4758 case WMI_TPC_PREAM_5GHZ_VHT20:
4759 pream = 1;
4760 break;
4761 case WMI_TPC_PREAM_5GHZ_HT40:
4762 case WMI_TPC_PREAM_5GHZ_VHT40:
4763 pream = 2;
4764 break;
4765 case WMI_TPC_PREAM_5GHZ_VHT80:
4766 pream = 3;
4767 break;
4768 case WMI_TPC_PREAM_5GHZ_HTCUP:
4769 pream = 4;
4770 break;
4771 default:
4772 pream = -1;
4773 break;
4774 }
4775 }
4776
4777 if (pream == 4)
4778 tpc = min_t(u8, ev->rates_array[rate_idx],
4779 ev->max_reg_allow_pow[ch]);
4780 else
4781 tpc = min_t(u8, min_t(u8, ev->rates_array[rate_idx],
4782 ev->max_reg_allow_pow[ch]),
4783 ev->ctl_power_table[0][pream][stm_idx]);
4784
4785 if (__le32_to_cpu(ev->num_tx_chain) <= 1)
4786 goto out;
4787
4788 if (preamble == WMI_RATE_PREAMBLE_CCK)
4789 goto out;
4790
4791 if (num_chains <= num_streams)
4792 goto out;
4793
4794 switch (type) {
4795 case WMI_TPC_TABLE_TYPE_STBC:
4796 pow_agstbc = ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx];
4797 if (pream == 4)
4798 tpc = min_t(u8, tpc, pow_agstbc);
4799 else
4800 tpc = min_t(u8, min_t(u8, tpc, pow_agstbc),
4801 ev->ctl_power_table[0][pream][stm_idx]);
4802 break;
4803 case WMI_TPC_TABLE_TYPE_TXBF:
4804 pow_agtxbf = ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx];
4805 if (pream == 4)
4806 tpc = min_t(u8, tpc, pow_agtxbf);
4807 else
4808 tpc = min_t(u8, min_t(u8, tpc, pow_agtxbf),
4809 ev->ctl_power_table[1][pream][stm_idx]);
4810 break;
4811 case WMI_TPC_TABLE_TYPE_CDD:
4812 pow_agcdd = ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx];
4813 if (pream == 4)
4814 tpc = min_t(u8, tpc, pow_agcdd);
4815 else
4816 tpc = min_t(u8, min_t(u8, tpc, pow_agcdd),
4817 ev->ctl_power_table[0][pream][stm_idx]);
4818 break;
4819 default:
4820 ath10k_warn(ar, "unknown wmi tpc final table type: %d\n", type);
4821 tpc = 0;
4822 break;
4823 }
4824
4825 out:
4826 return tpc;
4827 }
4828
4829 static void
ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k * ar,struct wmi_pdev_tpc_final_table_event * ev,struct ath10k_tpc_stats_final * tpc_stats,u8 * rate_code,u16 * pream_table,u8 type)4830 ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
4831 struct wmi_pdev_tpc_final_table_event *ev,
4832 struct ath10k_tpc_stats_final *tpc_stats,
4833 u8 *rate_code, u16 *pream_table, u8 type)
4834 {
4835 u32 i, j, pream_idx, flags;
4836 u8 tpc[WMI_TPC_TX_N_CHAIN];
4837 char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
4838 char buff[WMI_TPC_BUF_SIZE];
4839
4840 flags = __le32_to_cpu(ev->flags);
4841
4842 switch (type) {
4843 case WMI_TPC_TABLE_TYPE_CDD:
4844 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
4845 ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
4846 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4847 return;
4848 }
4849 break;
4850 case WMI_TPC_TABLE_TYPE_STBC:
4851 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
4852 ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
4853 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4854 return;
4855 }
4856 break;
4857 case WMI_TPC_TABLE_TYPE_TXBF:
4858 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
4859 ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
4860 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4861 return;
4862 }
4863 break;
4864 default:
4865 ath10k_dbg(ar, ATH10K_DBG_WMI,
4866 "invalid table type in wmi tpc event: %d\n", type);
4867 return;
4868 }
4869
4870 pream_idx = 0;
4871 for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
4872 memset(tpc_value, 0, sizeof(tpc_value));
4873 memset(buff, 0, sizeof(buff));
4874 if (i == pream_table[pream_idx])
4875 pream_idx++;
4876
4877 for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
4878 if (j >= __le32_to_cpu(ev->num_tx_chain))
4879 break;
4880
4881 tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
4882 rate_code[i],
4883 type, pream_idx);
4884 snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
4885 strlcat(tpc_value, buff, sizeof(tpc_value));
4886 }
4887 tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx;
4888 tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i];
4889 memcpy(tpc_stats->tpc_table_final[type].tpc_value[i],
4890 tpc_value, sizeof(tpc_value));
4891 }
4892 }
4893
ath10k_wmi_event_tpc_final_table(struct ath10k * ar,struct sk_buff * skb)4894 void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
4895 {
4896 u32 num_tx_chain;
4897 u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
4898 u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
4899 struct wmi_pdev_tpc_final_table_event *ev;
4900 struct ath10k_tpc_stats_final *tpc_stats;
4901
4902 ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
4903
4904 tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
4905 if (!tpc_stats)
4906 return;
4907
4908 num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4909
4910 ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
4911 num_tx_chain);
4912
4913 tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
4914 tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
4915 tpc_stats->ctl = __le32_to_cpu(ev->ctl);
4916 tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
4917 tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
4918 tpc_stats->twice_antenna_reduction =
4919 __le32_to_cpu(ev->twice_antenna_reduction);
4920 tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
4921 tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
4922 tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4923 tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
4924
4925 ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
4926 rate_code, pream_table,
4927 WMI_TPC_TABLE_TYPE_CDD);
4928 ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
4929 rate_code, pream_table,
4930 WMI_TPC_TABLE_TYPE_STBC);
4931 ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
4932 rate_code, pream_table,
4933 WMI_TPC_TABLE_TYPE_TXBF);
4934
4935 ath10k_debug_tpc_stats_final_process(ar, tpc_stats);
4936
4937 ath10k_dbg(ar, ATH10K_DBG_WMI,
4938 "wmi event tpc final table channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
4939 __le32_to_cpu(ev->chan_freq),
4940 __le32_to_cpu(ev->phy_mode),
4941 __le32_to_cpu(ev->ctl),
4942 __le32_to_cpu(ev->reg_domain),
4943 a_sle32_to_cpu(ev->twice_antenna_gain),
4944 __le32_to_cpu(ev->twice_antenna_reduction),
4945 __le32_to_cpu(ev->power_limit),
4946 __le32_to_cpu(ev->twice_max_rd_power) / 2,
4947 __le32_to_cpu(ev->num_tx_chain),
4948 __le32_to_cpu(ev->rate_max));
4949 }
4950
4951 static void
ath10k_wmi_handle_tdls_peer_event(struct ath10k * ar,struct sk_buff * skb)4952 ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
4953 {
4954 struct wmi_tdls_peer_event *ev;
4955 struct ath10k_peer *peer;
4956 struct ath10k_vif *arvif;
4957 int vdev_id;
4958 int peer_status;
4959 int peer_reason;
4960 u8 reason;
4961
4962 if (skb->len < sizeof(*ev)) {
4963 ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
4964 skb->len);
4965 return;
4966 }
4967
4968 ev = (struct wmi_tdls_peer_event *)skb->data;
4969 vdev_id = __le32_to_cpu(ev->vdev_id);
4970 peer_status = __le32_to_cpu(ev->peer_status);
4971 peer_reason = __le32_to_cpu(ev->peer_reason);
4972
4973 spin_lock_bh(&ar->data_lock);
4974 peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
4975 spin_unlock_bh(&ar->data_lock);
4976
4977 if (!peer) {
4978 ath10k_warn(ar, "failed to find peer entry for %pM\n",
4979 ev->peer_macaddr.addr);
4980 return;
4981 }
4982
4983 switch (peer_status) {
4984 case WMI_TDLS_SHOULD_TEARDOWN:
4985 switch (peer_reason) {
4986 case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
4987 case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
4988 case WMI_TDLS_TEARDOWN_REASON_RSSI:
4989 reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
4990 break;
4991 default:
4992 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
4993 break;
4994 }
4995
4996 arvif = ath10k_get_arvif(ar, vdev_id);
4997 if (!arvif) {
4998 ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
4999 vdev_id);
5000 return;
5001 }
5002
5003 ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
5004 NL80211_TDLS_TEARDOWN, reason,
5005 GFP_ATOMIC);
5006
5007 ath10k_dbg(ar, ATH10K_DBG_WMI,
5008 "received tdls teardown event for peer %pM reason %u\n",
5009 ev->peer_macaddr.addr, peer_reason);
5010 break;
5011 default:
5012 ath10k_dbg(ar, ATH10K_DBG_WMI,
5013 "received unknown tdls peer event %u\n",
5014 peer_status);
5015 break;
5016 }
5017 }
5018
ath10k_wmi_event_pdev_ftm_intg(struct ath10k * ar,struct sk_buff * skb)5019 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
5020 {
5021 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
5022 }
5023
ath10k_wmi_event_gtk_offload_status(struct ath10k * ar,struct sk_buff * skb)5024 void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
5025 {
5026 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
5027 }
5028
ath10k_wmi_event_gtk_rekey_fail(struct ath10k * ar,struct sk_buff * skb)5029 void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
5030 {
5031 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
5032 }
5033
ath10k_wmi_event_delba_complete(struct ath10k * ar,struct sk_buff * skb)5034 void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
5035 {
5036 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
5037 }
5038
ath10k_wmi_event_addba_complete(struct ath10k * ar,struct sk_buff * skb)5039 void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
5040 {
5041 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
5042 }
5043
ath10k_wmi_event_vdev_install_key_complete(struct ath10k * ar,struct sk_buff * skb)5044 void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
5045 struct sk_buff *skb)
5046 {
5047 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
5048 }
5049
ath10k_wmi_event_inst_rssi_stats(struct ath10k * ar,struct sk_buff * skb)5050 void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
5051 {
5052 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
5053 }
5054
ath10k_wmi_event_vdev_standby_req(struct ath10k * ar,struct sk_buff * skb)5055 void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
5056 {
5057 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
5058 }
5059
ath10k_wmi_event_vdev_resume_req(struct ath10k * ar,struct sk_buff * skb)5060 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
5061 {
5062 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
5063 }
5064
ath10k_wmi_alloc_chunk(struct ath10k * ar,u32 req_id,u32 num_units,u32 unit_len)5065 static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
5066 u32 num_units, u32 unit_len)
5067 {
5068 dma_addr_t paddr;
5069 u32 pool_size;
5070 int idx = ar->wmi.num_mem_chunks;
5071 void *vaddr;
5072
5073 pool_size = num_units * round_up(unit_len, 4);
5074 vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
5075
5076 if (!vaddr)
5077 return -ENOMEM;
5078
5079 ar->wmi.mem_chunks[idx].vaddr = vaddr;
5080 ar->wmi.mem_chunks[idx].paddr = paddr;
5081 ar->wmi.mem_chunks[idx].len = pool_size;
5082 ar->wmi.mem_chunks[idx].req_id = req_id;
5083 ar->wmi.num_mem_chunks++;
5084
5085 return num_units;
5086 }
5087
ath10k_wmi_alloc_host_mem(struct ath10k * ar,u32 req_id,u32 num_units,u32 unit_len)5088 static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
5089 u32 num_units, u32 unit_len)
5090 {
5091 int ret;
5092
5093 while (num_units) {
5094 ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len);
5095 if (ret < 0)
5096 return ret;
5097
5098 num_units -= ret;
5099 }
5100
5101 return 0;
5102 }
5103
5104 static bool
ath10k_wmi_is_host_mem_allocated(struct ath10k * ar,const struct wlan_host_mem_req ** mem_reqs,u32 num_mem_reqs)5105 ath10k_wmi_is_host_mem_allocated(struct ath10k *ar,
5106 const struct wlan_host_mem_req **mem_reqs,
5107 u32 num_mem_reqs)
5108 {
5109 u32 req_id, num_units, unit_size, num_unit_info;
5110 u32 pool_size;
5111 int i, j;
5112 bool found;
5113
5114 if (ar->wmi.num_mem_chunks != num_mem_reqs)
5115 return false;
5116
5117 for (i = 0; i < num_mem_reqs; ++i) {
5118 req_id = __le32_to_cpu(mem_reqs[i]->req_id);
5119 num_units = __le32_to_cpu(mem_reqs[i]->num_units);
5120 unit_size = __le32_to_cpu(mem_reqs[i]->unit_size);
5121 num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info);
5122
5123 if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5124 if (ar->num_active_peers)
5125 num_units = ar->num_active_peers + 1;
5126 else
5127 num_units = ar->max_num_peers + 1;
5128 } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5129 num_units = ar->max_num_peers + 1;
5130 } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5131 num_units = ar->max_num_vdevs + 1;
5132 }
5133
5134 found = false;
5135 for (j = 0; j < ar->wmi.num_mem_chunks; j++) {
5136 if (ar->wmi.mem_chunks[j].req_id == req_id) {
5137 pool_size = num_units * round_up(unit_size, 4);
5138 if (ar->wmi.mem_chunks[j].len == pool_size) {
5139 found = true;
5140 break;
5141 }
5142 }
5143 }
5144 if (!found)
5145 return false;
5146 }
5147
5148 return true;
5149 }
5150
5151 static int
ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_svc_rdy_ev_arg * arg)5152 ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5153 struct wmi_svc_rdy_ev_arg *arg)
5154 {
5155 struct wmi_service_ready_event *ev;
5156 size_t i, n;
5157
5158 if (skb->len < sizeof(*ev))
5159 return -EPROTO;
5160
5161 ev = (void *)skb->data;
5162 skb_pull(skb, sizeof(*ev));
5163 arg->min_tx_power = ev->hw_min_tx_power;
5164 arg->max_tx_power = ev->hw_max_tx_power;
5165 arg->ht_cap = ev->ht_cap_info;
5166 arg->vht_cap = ev->vht_cap_info;
5167 arg->sw_ver0 = ev->sw_version;
5168 arg->sw_ver1 = ev->sw_version_1;
5169 arg->phy_capab = ev->phy_capability;
5170 arg->num_rf_chains = ev->num_rf_chains;
5171 arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5172 arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5173 arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5174 arg->num_mem_reqs = ev->num_mem_reqs;
5175 arg->service_map = ev->wmi_service_bitmap;
5176 arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5177
5178 n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5179 ARRAY_SIZE(arg->mem_reqs));
5180 for (i = 0; i < n; i++)
5181 arg->mem_reqs[i] = &ev->mem_reqs[i];
5182
5183 if (skb->len <
5184 __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5185 return -EPROTO;
5186
5187 return 0;
5188 }
5189
5190 static int
ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_svc_rdy_ev_arg * arg)5191 ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5192 struct wmi_svc_rdy_ev_arg *arg)
5193 {
5194 struct wmi_10x_service_ready_event *ev;
5195 int i, n;
5196
5197 if (skb->len < sizeof(*ev))
5198 return -EPROTO;
5199
5200 ev = (void *)skb->data;
5201 skb_pull(skb, sizeof(*ev));
5202 arg->min_tx_power = ev->hw_min_tx_power;
5203 arg->max_tx_power = ev->hw_max_tx_power;
5204 arg->ht_cap = ev->ht_cap_info;
5205 arg->vht_cap = ev->vht_cap_info;
5206 arg->sw_ver0 = ev->sw_version;
5207 arg->phy_capab = ev->phy_capability;
5208 arg->num_rf_chains = ev->num_rf_chains;
5209 arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5210 arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5211 arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5212 arg->num_mem_reqs = ev->num_mem_reqs;
5213 arg->service_map = ev->wmi_service_bitmap;
5214 arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5215
5216 n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5217 ARRAY_SIZE(arg->mem_reqs));
5218 for (i = 0; i < n; i++)
5219 arg->mem_reqs[i] = &ev->mem_reqs[i];
5220
5221 if (skb->len <
5222 __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5223 return -EPROTO;
5224
5225 return 0;
5226 }
5227
ath10k_wmi_event_service_ready_work(struct work_struct * work)5228 static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
5229 {
5230 struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
5231 struct sk_buff *skb = ar->svc_rdy_skb;
5232 struct wmi_svc_rdy_ev_arg arg = {};
5233 u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
5234 int ret;
5235 bool allocated;
5236
5237 if (!skb) {
5238 ath10k_warn(ar, "invalid service ready event skb\n");
5239 return;
5240 }
5241
5242 ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
5243 if (ret) {
5244 ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
5245 return;
5246 }
5247
5248 ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
5249 arg.service_map_len);
5250
5251 ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
5252 ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
5253 ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
5254 ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
5255 ar->fw_version_major =
5256 (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
5257 ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
5258 ar->fw_version_release =
5259 (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
5260 ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
5261 ar->phy_capability = __le32_to_cpu(arg.phy_capab);
5262 ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
5263 ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
5264 ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
5265 ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
5266
5267 ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
5268 arg.service_map, arg.service_map_len);
5269
5270 if (ar->num_rf_chains > ar->max_spatial_stream) {
5271 ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
5272 ar->num_rf_chains, ar->max_spatial_stream);
5273 ar->num_rf_chains = ar->max_spatial_stream;
5274 }
5275
5276 if (!ar->cfg_tx_chainmask) {
5277 ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1;
5278 ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1;
5279 }
5280
5281 if (strlen(ar->hw->wiphy->fw_version) == 0) {
5282 snprintf(ar->hw->wiphy->fw_version,
5283 sizeof(ar->hw->wiphy->fw_version),
5284 "%u.%u.%u.%u",
5285 ar->fw_version_major,
5286 ar->fw_version_minor,
5287 ar->fw_version_release,
5288 ar->fw_version_build);
5289 }
5290
5291 num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
5292 if (num_mem_reqs > WMI_MAX_MEM_REQS) {
5293 ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
5294 num_mem_reqs);
5295 return;
5296 }
5297
5298 if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
5299 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
5300 ar->running_fw->fw_file.fw_features))
5301 ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
5302 ar->max_num_vdevs;
5303 else
5304 ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
5305 ar->max_num_vdevs;
5306
5307 ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
5308 ar->max_num_vdevs;
5309 ar->num_tids = ar->num_active_peers * 2;
5310 ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
5311 }
5312
5313 /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
5314 * and WMI_SERVICE_IRAM_TIDS, etc.
5315 */
5316
5317 allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs,
5318 num_mem_reqs);
5319 if (allocated)
5320 goto skip_mem_alloc;
5321
5322 /* Either this event is received during boot time or there is a change
5323 * in memory requirement from firmware when compared to last request.
5324 * Free any old memory and do a fresh allocation based on the current
5325 * memory requirement.
5326 */
5327 ath10k_wmi_free_host_mem(ar);
5328
5329 for (i = 0; i < num_mem_reqs; ++i) {
5330 req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
5331 num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
5332 unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
5333 num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
5334
5335 if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5336 if (ar->num_active_peers)
5337 num_units = ar->num_active_peers + 1;
5338 else
5339 num_units = ar->max_num_peers + 1;
5340 } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5341 /* number of units to allocate is number of
5342 * peers, 1 extra for self peer on target
5343 * this needs to be tied, host and target
5344 * can get out of sync
5345 */
5346 num_units = ar->max_num_peers + 1;
5347 } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5348 num_units = ar->max_num_vdevs + 1;
5349 }
5350
5351 ath10k_dbg(ar, ATH10K_DBG_WMI,
5352 "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
5353 req_id,
5354 __le32_to_cpu(arg.mem_reqs[i]->num_units),
5355 num_unit_info,
5356 unit_size,
5357 num_units);
5358
5359 ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
5360 unit_size);
5361 if (ret)
5362 return;
5363 }
5364
5365 skip_mem_alloc:
5366 ath10k_dbg(ar, ATH10K_DBG_WMI,
5367 "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
5368 __le32_to_cpu(arg.min_tx_power),
5369 __le32_to_cpu(arg.max_tx_power),
5370 __le32_to_cpu(arg.ht_cap),
5371 __le32_to_cpu(arg.vht_cap),
5372 __le32_to_cpu(arg.sw_ver0),
5373 __le32_to_cpu(arg.sw_ver1),
5374 __le32_to_cpu(arg.fw_build),
5375 __le32_to_cpu(arg.phy_capab),
5376 __le32_to_cpu(arg.num_rf_chains),
5377 __le32_to_cpu(arg.eeprom_rd),
5378 __le32_to_cpu(arg.num_mem_reqs));
5379
5380 dev_kfree_skb(skb);
5381 ar->svc_rdy_skb = NULL;
5382 complete(&ar->wmi.service_ready);
5383 }
5384
ath10k_wmi_event_service_ready(struct ath10k * ar,struct sk_buff * skb)5385 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
5386 {
5387 ar->svc_rdy_skb = skb;
5388 queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
5389 }
5390
ath10k_wmi_op_pull_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_rdy_ev_arg * arg)5391 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5392 struct wmi_rdy_ev_arg *arg)
5393 {
5394 struct wmi_ready_event *ev = (void *)skb->data;
5395
5396 if (skb->len < sizeof(*ev))
5397 return -EPROTO;
5398
5399 skb_pull(skb, sizeof(*ev));
5400 arg->sw_version = ev->sw_version;
5401 arg->abi_version = ev->abi_version;
5402 arg->status = ev->status;
5403 arg->mac_addr = ev->mac_addr.addr;
5404
5405 return 0;
5406 }
5407
ath10k_wmi_op_pull_roam_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_roam_ev_arg * arg)5408 static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
5409 struct wmi_roam_ev_arg *arg)
5410 {
5411 struct wmi_roam_ev *ev = (void *)skb->data;
5412
5413 if (skb->len < sizeof(*ev))
5414 return -EPROTO;
5415
5416 skb_pull(skb, sizeof(*ev));
5417 arg->vdev_id = ev->vdev_id;
5418 arg->reason = ev->reason;
5419
5420 return 0;
5421 }
5422
ath10k_wmi_op_pull_echo_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_echo_ev_arg * arg)5423 static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
5424 struct sk_buff *skb,
5425 struct wmi_echo_ev_arg *arg)
5426 {
5427 struct wmi_echo_event *ev = (void *)skb->data;
5428
5429 arg->value = ev->value;
5430
5431 return 0;
5432 }
5433
ath10k_wmi_event_ready(struct ath10k * ar,struct sk_buff * skb)5434 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
5435 {
5436 struct wmi_rdy_ev_arg arg = {};
5437 int ret;
5438
5439 ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
5440 if (ret) {
5441 ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
5442 return ret;
5443 }
5444
5445 ath10k_dbg(ar, ATH10K_DBG_WMI,
5446 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
5447 __le32_to_cpu(arg.sw_version),
5448 __le32_to_cpu(arg.abi_version),
5449 arg.mac_addr,
5450 __le32_to_cpu(arg.status));
5451
5452 ether_addr_copy(ar->mac_addr, arg.mac_addr);
5453 complete(&ar->wmi.unified_ready);
5454 return 0;
5455 }
5456
ath10k_wmi_event_service_available(struct ath10k * ar,struct sk_buff * skb)5457 void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
5458 {
5459 int ret;
5460 struct wmi_svc_avail_ev_arg arg = {};
5461
5462 ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg);
5463 if (ret) {
5464 ath10k_warn(ar, "failed to parse service available event: %d\n",
5465 ret);
5466 }
5467
5468 ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
5469 __le32_to_cpu(arg.service_map_ext_len));
5470 }
5471
ath10k_wmi_event_temperature(struct ath10k * ar,struct sk_buff * skb)5472 static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
5473 {
5474 const struct wmi_pdev_temperature_event *ev;
5475
5476 ev = (struct wmi_pdev_temperature_event *)skb->data;
5477 if (WARN_ON(skb->len < sizeof(*ev)))
5478 return -EPROTO;
5479
5480 ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
5481 return 0;
5482 }
5483
ath10k_wmi_event_pdev_bss_chan_info(struct ath10k * ar,struct sk_buff * skb)5484 static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
5485 struct sk_buff *skb)
5486 {
5487 struct wmi_pdev_bss_chan_info_event *ev;
5488 struct survey_info *survey;
5489 u64 busy, total, tx, rx, rx_bss;
5490 u32 freq, noise_floor;
5491 u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
5492 int idx;
5493
5494 ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
5495 if (WARN_ON(skb->len < sizeof(*ev)))
5496 return -EPROTO;
5497
5498 freq = __le32_to_cpu(ev->freq);
5499 noise_floor = __le32_to_cpu(ev->noise_floor);
5500 busy = __le64_to_cpu(ev->cycle_busy);
5501 total = __le64_to_cpu(ev->cycle_total);
5502 tx = __le64_to_cpu(ev->cycle_tx);
5503 rx = __le64_to_cpu(ev->cycle_rx);
5504 rx_bss = __le64_to_cpu(ev->cycle_rx_bss);
5505
5506 ath10k_dbg(ar, ATH10K_DBG_WMI,
5507 "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
5508 freq, noise_floor, busy, total, tx, rx, rx_bss);
5509
5510 spin_lock_bh(&ar->data_lock);
5511 idx = freq_to_idx(ar, freq);
5512 if (idx >= ARRAY_SIZE(ar->survey)) {
5513 ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
5514 freq, idx);
5515 goto exit;
5516 }
5517
5518 survey = &ar->survey[idx];
5519
5520 survey->noise = noise_floor;
5521 survey->time = div_u64(total, cc_freq_hz);
5522 survey->time_busy = div_u64(busy, cc_freq_hz);
5523 survey->time_rx = div_u64(rx_bss, cc_freq_hz);
5524 survey->time_tx = div_u64(tx, cc_freq_hz);
5525 survey->filled |= (SURVEY_INFO_NOISE_DBM |
5526 SURVEY_INFO_TIME |
5527 SURVEY_INFO_TIME_BUSY |
5528 SURVEY_INFO_TIME_RX |
5529 SURVEY_INFO_TIME_TX);
5530 exit:
5531 spin_unlock_bh(&ar->data_lock);
5532 complete(&ar->bss_survey_done);
5533 return 0;
5534 }
5535
ath10k_wmi_queue_set_coverage_class_work(struct ath10k * ar)5536 static inline void ath10k_wmi_queue_set_coverage_class_work(struct ath10k *ar)
5537 {
5538 if (ar->hw_params.hw_ops->set_coverage_class) {
5539 spin_lock_bh(&ar->data_lock);
5540
5541 /* This call only ensures that the modified coverage class
5542 * persists in case the firmware sets the registers back to
5543 * their default value. So calling it is only necessary if the
5544 * coverage class has a non-zero value.
5545 */
5546 if (ar->fw_coverage.coverage_class)
5547 queue_work(ar->workqueue, &ar->set_coverage_class_work);
5548
5549 spin_unlock_bh(&ar->data_lock);
5550 }
5551 }
5552
ath10k_wmi_op_rx(struct ath10k * ar,struct sk_buff * skb)5553 static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
5554 {
5555 struct wmi_cmd_hdr *cmd_hdr;
5556 enum wmi_event_id id;
5557
5558 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
5559 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
5560
5561 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
5562 goto out;
5563
5564 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
5565
5566 switch (id) {
5567 case WMI_MGMT_RX_EVENTID:
5568 ath10k_wmi_event_mgmt_rx(ar, skb);
5569 /* mgmt_rx() owns the skb now! */
5570 return;
5571 case WMI_SCAN_EVENTID:
5572 ath10k_wmi_event_scan(ar, skb);
5573 ath10k_wmi_queue_set_coverage_class_work(ar);
5574 break;
5575 case WMI_CHAN_INFO_EVENTID:
5576 ath10k_wmi_event_chan_info(ar, skb);
5577 break;
5578 case WMI_ECHO_EVENTID:
5579 ath10k_wmi_event_echo(ar, skb);
5580 break;
5581 case WMI_DEBUG_MESG_EVENTID:
5582 ath10k_wmi_event_debug_mesg(ar, skb);
5583 ath10k_wmi_queue_set_coverage_class_work(ar);
5584 break;
5585 case WMI_UPDATE_STATS_EVENTID:
5586 ath10k_wmi_event_update_stats(ar, skb);
5587 break;
5588 case WMI_VDEV_START_RESP_EVENTID:
5589 ath10k_wmi_event_vdev_start_resp(ar, skb);
5590 ath10k_wmi_queue_set_coverage_class_work(ar);
5591 break;
5592 case WMI_VDEV_STOPPED_EVENTID:
5593 ath10k_wmi_event_vdev_stopped(ar, skb);
5594 ath10k_wmi_queue_set_coverage_class_work(ar);
5595 break;
5596 case WMI_PEER_STA_KICKOUT_EVENTID:
5597 ath10k_wmi_event_peer_sta_kickout(ar, skb);
5598 break;
5599 case WMI_HOST_SWBA_EVENTID:
5600 ath10k_wmi_event_host_swba(ar, skb);
5601 break;
5602 case WMI_TBTTOFFSET_UPDATE_EVENTID:
5603 ath10k_wmi_event_tbttoffset_update(ar, skb);
5604 break;
5605 case WMI_PHYERR_EVENTID:
5606 ath10k_wmi_event_phyerr(ar, skb);
5607 break;
5608 case WMI_ROAM_EVENTID:
5609 ath10k_wmi_event_roam(ar, skb);
5610 ath10k_wmi_queue_set_coverage_class_work(ar);
5611 break;
5612 case WMI_PROFILE_MATCH:
5613 ath10k_wmi_event_profile_match(ar, skb);
5614 break;
5615 case WMI_DEBUG_PRINT_EVENTID:
5616 ath10k_wmi_event_debug_print(ar, skb);
5617 ath10k_wmi_queue_set_coverage_class_work(ar);
5618 break;
5619 case WMI_PDEV_QVIT_EVENTID:
5620 ath10k_wmi_event_pdev_qvit(ar, skb);
5621 break;
5622 case WMI_WLAN_PROFILE_DATA_EVENTID:
5623 ath10k_wmi_event_wlan_profile_data(ar, skb);
5624 break;
5625 case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
5626 ath10k_wmi_event_rtt_measurement_report(ar, skb);
5627 break;
5628 case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
5629 ath10k_wmi_event_tsf_measurement_report(ar, skb);
5630 break;
5631 case WMI_RTT_ERROR_REPORT_EVENTID:
5632 ath10k_wmi_event_rtt_error_report(ar, skb);
5633 break;
5634 case WMI_WOW_WAKEUP_HOST_EVENTID:
5635 ath10k_wmi_event_wow_wakeup_host(ar, skb);
5636 break;
5637 case WMI_DCS_INTERFERENCE_EVENTID:
5638 ath10k_wmi_event_dcs_interference(ar, skb);
5639 break;
5640 case WMI_PDEV_TPC_CONFIG_EVENTID:
5641 ath10k_wmi_event_pdev_tpc_config(ar, skb);
5642 break;
5643 case WMI_PDEV_FTM_INTG_EVENTID:
5644 ath10k_wmi_event_pdev_ftm_intg(ar, skb);
5645 break;
5646 case WMI_GTK_OFFLOAD_STATUS_EVENTID:
5647 ath10k_wmi_event_gtk_offload_status(ar, skb);
5648 break;
5649 case WMI_GTK_REKEY_FAIL_EVENTID:
5650 ath10k_wmi_event_gtk_rekey_fail(ar, skb);
5651 break;
5652 case WMI_TX_DELBA_COMPLETE_EVENTID:
5653 ath10k_wmi_event_delba_complete(ar, skb);
5654 break;
5655 case WMI_TX_ADDBA_COMPLETE_EVENTID:
5656 ath10k_wmi_event_addba_complete(ar, skb);
5657 break;
5658 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
5659 ath10k_wmi_event_vdev_install_key_complete(ar, skb);
5660 break;
5661 case WMI_SERVICE_READY_EVENTID:
5662 ath10k_wmi_event_service_ready(ar, skb);
5663 return;
5664 case WMI_READY_EVENTID:
5665 ath10k_wmi_event_ready(ar, skb);
5666 ath10k_wmi_queue_set_coverage_class_work(ar);
5667 break;
5668 case WMI_SERVICE_AVAILABLE_EVENTID:
5669 ath10k_wmi_event_service_available(ar, skb);
5670 break;
5671 default:
5672 ath10k_warn(ar, "Unknown eventid: %d\n", id);
5673 break;
5674 }
5675
5676 out:
5677 dev_kfree_skb(skb);
5678 }
5679
ath10k_wmi_10_1_op_rx(struct ath10k * ar,struct sk_buff * skb)5680 static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
5681 {
5682 struct wmi_cmd_hdr *cmd_hdr;
5683 enum wmi_10x_event_id id;
5684 bool consumed;
5685
5686 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
5687 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
5688
5689 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
5690 goto out;
5691
5692 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
5693
5694 consumed = ath10k_tm_event_wmi(ar, id, skb);
5695
5696 /* Ready event must be handled normally also in UTF mode so that we
5697 * know the UTF firmware has booted, others we are just bypass WMI
5698 * events to testmode.
5699 */
5700 if (consumed && id != WMI_10X_READY_EVENTID) {
5701 ath10k_dbg(ar, ATH10K_DBG_WMI,
5702 "wmi testmode consumed 0x%x\n", id);
5703 goto out;
5704 }
5705
5706 switch (id) {
5707 case WMI_10X_MGMT_RX_EVENTID:
5708 ath10k_wmi_event_mgmt_rx(ar, skb);
5709 /* mgmt_rx() owns the skb now! */
5710 return;
5711 case WMI_10X_SCAN_EVENTID:
5712 ath10k_wmi_event_scan(ar, skb);
5713 ath10k_wmi_queue_set_coverage_class_work(ar);
5714 break;
5715 case WMI_10X_CHAN_INFO_EVENTID:
5716 ath10k_wmi_event_chan_info(ar, skb);
5717 break;
5718 case WMI_10X_ECHO_EVENTID:
5719 ath10k_wmi_event_echo(ar, skb);
5720 break;
5721 case WMI_10X_DEBUG_MESG_EVENTID:
5722 ath10k_wmi_event_debug_mesg(ar, skb);
5723 ath10k_wmi_queue_set_coverage_class_work(ar);
5724 break;
5725 case WMI_10X_UPDATE_STATS_EVENTID:
5726 ath10k_wmi_event_update_stats(ar, skb);
5727 break;
5728 case WMI_10X_VDEV_START_RESP_EVENTID:
5729 ath10k_wmi_event_vdev_start_resp(ar, skb);
5730 ath10k_wmi_queue_set_coverage_class_work(ar);
5731 break;
5732 case WMI_10X_VDEV_STOPPED_EVENTID:
5733 ath10k_wmi_event_vdev_stopped(ar, skb);
5734 ath10k_wmi_queue_set_coverage_class_work(ar);
5735 break;
5736 case WMI_10X_PEER_STA_KICKOUT_EVENTID:
5737 ath10k_wmi_event_peer_sta_kickout(ar, skb);
5738 break;
5739 case WMI_10X_HOST_SWBA_EVENTID:
5740 ath10k_wmi_event_host_swba(ar, skb);
5741 break;
5742 case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
5743 ath10k_wmi_event_tbttoffset_update(ar, skb);
5744 break;
5745 case WMI_10X_PHYERR_EVENTID:
5746 ath10k_wmi_event_phyerr(ar, skb);
5747 break;
5748 case WMI_10X_ROAM_EVENTID:
5749 ath10k_wmi_event_roam(ar, skb);
5750 ath10k_wmi_queue_set_coverage_class_work(ar);
5751 break;
5752 case WMI_10X_PROFILE_MATCH:
5753 ath10k_wmi_event_profile_match(ar, skb);
5754 break;
5755 case WMI_10X_DEBUG_PRINT_EVENTID:
5756 ath10k_wmi_event_debug_print(ar, skb);
5757 ath10k_wmi_queue_set_coverage_class_work(ar);
5758 break;
5759 case WMI_10X_PDEV_QVIT_EVENTID:
5760 ath10k_wmi_event_pdev_qvit(ar, skb);
5761 break;
5762 case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
5763 ath10k_wmi_event_wlan_profile_data(ar, skb);
5764 break;
5765 case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
5766 ath10k_wmi_event_rtt_measurement_report(ar, skb);
5767 break;
5768 case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
5769 ath10k_wmi_event_tsf_measurement_report(ar, skb);
5770 break;
5771 case WMI_10X_RTT_ERROR_REPORT_EVENTID:
5772 ath10k_wmi_event_rtt_error_report(ar, skb);
5773 break;
5774 case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
5775 ath10k_wmi_event_wow_wakeup_host(ar, skb);
5776 break;
5777 case WMI_10X_DCS_INTERFERENCE_EVENTID:
5778 ath10k_wmi_event_dcs_interference(ar, skb);
5779 break;
5780 case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
5781 ath10k_wmi_event_pdev_tpc_config(ar, skb);
5782 break;
5783 case WMI_10X_INST_RSSI_STATS_EVENTID:
5784 ath10k_wmi_event_inst_rssi_stats(ar, skb);
5785 break;
5786 case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
5787 ath10k_wmi_event_vdev_standby_req(ar, skb);
5788 break;
5789 case WMI_10X_VDEV_RESUME_REQ_EVENTID:
5790 ath10k_wmi_event_vdev_resume_req(ar, skb);
5791 break;
5792 case WMI_10X_SERVICE_READY_EVENTID:
5793 ath10k_wmi_event_service_ready(ar, skb);
5794 return;
5795 case WMI_10X_READY_EVENTID:
5796 ath10k_wmi_event_ready(ar, skb);
5797 ath10k_wmi_queue_set_coverage_class_work(ar);
5798 break;
5799 case WMI_10X_PDEV_UTF_EVENTID:
5800 /* ignore utf events */
5801 break;
5802 default:
5803 ath10k_warn(ar, "Unknown eventid: %d\n", id);
5804 break;
5805 }
5806
5807 out:
5808 dev_kfree_skb(skb);
5809 }
5810
ath10k_wmi_10_2_op_rx(struct ath10k * ar,struct sk_buff * skb)5811 static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
5812 {
5813 struct wmi_cmd_hdr *cmd_hdr;
5814 enum wmi_10_2_event_id id;
5815 bool consumed;
5816
5817 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
5818 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
5819
5820 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
5821 goto out;
5822
5823 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
5824
5825 consumed = ath10k_tm_event_wmi(ar, id, skb);
5826
5827 /* Ready event must be handled normally also in UTF mode so that we
5828 * know the UTF firmware has booted, others we are just bypass WMI
5829 * events to testmode.
5830 */
5831 if (consumed && id != WMI_10_2_READY_EVENTID) {
5832 ath10k_dbg(ar, ATH10K_DBG_WMI,
5833 "wmi testmode consumed 0x%x\n", id);
5834 goto out;
5835 }
5836
5837 switch (id) {
5838 case WMI_10_2_MGMT_RX_EVENTID:
5839 ath10k_wmi_event_mgmt_rx(ar, skb);
5840 /* mgmt_rx() owns the skb now! */
5841 return;
5842 case WMI_10_2_SCAN_EVENTID:
5843 ath10k_wmi_event_scan(ar, skb);
5844 ath10k_wmi_queue_set_coverage_class_work(ar);
5845 break;
5846 case WMI_10_2_CHAN_INFO_EVENTID:
5847 ath10k_wmi_event_chan_info(ar, skb);
5848 break;
5849 case WMI_10_2_ECHO_EVENTID:
5850 ath10k_wmi_event_echo(ar, skb);
5851 break;
5852 case WMI_10_2_DEBUG_MESG_EVENTID:
5853 ath10k_wmi_event_debug_mesg(ar, skb);
5854 ath10k_wmi_queue_set_coverage_class_work(ar);
5855 break;
5856 case WMI_10_2_UPDATE_STATS_EVENTID:
5857 ath10k_wmi_event_update_stats(ar, skb);
5858 break;
5859 case WMI_10_2_VDEV_START_RESP_EVENTID:
5860 ath10k_wmi_event_vdev_start_resp(ar, skb);
5861 ath10k_wmi_queue_set_coverage_class_work(ar);
5862 break;
5863 case WMI_10_2_VDEV_STOPPED_EVENTID:
5864 ath10k_wmi_event_vdev_stopped(ar, skb);
5865 ath10k_wmi_queue_set_coverage_class_work(ar);
5866 break;
5867 case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
5868 ath10k_wmi_event_peer_sta_kickout(ar, skb);
5869 break;
5870 case WMI_10_2_HOST_SWBA_EVENTID:
5871 ath10k_wmi_event_host_swba(ar, skb);
5872 break;
5873 case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
5874 ath10k_wmi_event_tbttoffset_update(ar, skb);
5875 break;
5876 case WMI_10_2_PHYERR_EVENTID:
5877 ath10k_wmi_event_phyerr(ar, skb);
5878 break;
5879 case WMI_10_2_ROAM_EVENTID:
5880 ath10k_wmi_event_roam(ar, skb);
5881 ath10k_wmi_queue_set_coverage_class_work(ar);
5882 break;
5883 case WMI_10_2_PROFILE_MATCH:
5884 ath10k_wmi_event_profile_match(ar, skb);
5885 break;
5886 case WMI_10_2_DEBUG_PRINT_EVENTID:
5887 ath10k_wmi_event_debug_print(ar, skb);
5888 ath10k_wmi_queue_set_coverage_class_work(ar);
5889 break;
5890 case WMI_10_2_PDEV_QVIT_EVENTID:
5891 ath10k_wmi_event_pdev_qvit(ar, skb);
5892 break;
5893 case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
5894 ath10k_wmi_event_wlan_profile_data(ar, skb);
5895 break;
5896 case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
5897 ath10k_wmi_event_rtt_measurement_report(ar, skb);
5898 break;
5899 case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
5900 ath10k_wmi_event_tsf_measurement_report(ar, skb);
5901 break;
5902 case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
5903 ath10k_wmi_event_rtt_error_report(ar, skb);
5904 break;
5905 case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
5906 ath10k_wmi_event_wow_wakeup_host(ar, skb);
5907 break;
5908 case WMI_10_2_DCS_INTERFERENCE_EVENTID:
5909 ath10k_wmi_event_dcs_interference(ar, skb);
5910 break;
5911 case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
5912 ath10k_wmi_event_pdev_tpc_config(ar, skb);
5913 break;
5914 case WMI_10_2_INST_RSSI_STATS_EVENTID:
5915 ath10k_wmi_event_inst_rssi_stats(ar, skb);
5916 break;
5917 case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
5918 ath10k_wmi_event_vdev_standby_req(ar, skb);
5919 ath10k_wmi_queue_set_coverage_class_work(ar);
5920 break;
5921 case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
5922 ath10k_wmi_event_vdev_resume_req(ar, skb);
5923 ath10k_wmi_queue_set_coverage_class_work(ar);
5924 break;
5925 case WMI_10_2_SERVICE_READY_EVENTID:
5926 ath10k_wmi_event_service_ready(ar, skb);
5927 return;
5928 case WMI_10_2_READY_EVENTID:
5929 ath10k_wmi_event_ready(ar, skb);
5930 ath10k_wmi_queue_set_coverage_class_work(ar);
5931 break;
5932 case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
5933 ath10k_wmi_event_temperature(ar, skb);
5934 break;
5935 case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
5936 ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
5937 break;
5938 case WMI_10_2_RTT_KEEPALIVE_EVENTID:
5939 case WMI_10_2_GPIO_INPUT_EVENTID:
5940 case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
5941 case WMI_10_2_GENERIC_BUFFER_EVENTID:
5942 case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
5943 case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
5944 case WMI_10_2_WDS_PEER_EVENTID:
5945 ath10k_dbg(ar, ATH10K_DBG_WMI,
5946 "received event id %d not implemented\n", id);
5947 break;
5948 default:
5949 ath10k_warn(ar, "Unknown eventid: %d\n", id);
5950 break;
5951 }
5952
5953 out:
5954 dev_kfree_skb(skb);
5955 }
5956
ath10k_wmi_10_4_op_rx(struct ath10k * ar,struct sk_buff * skb)5957 static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
5958 {
5959 struct wmi_cmd_hdr *cmd_hdr;
5960 enum wmi_10_4_event_id id;
5961 bool consumed;
5962
5963 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
5964 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
5965
5966 if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
5967 goto out;
5968
5969 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
5970
5971 consumed = ath10k_tm_event_wmi(ar, id, skb);
5972
5973 /* Ready event must be handled normally also in UTF mode so that we
5974 * know the UTF firmware has booted, others we are just bypass WMI
5975 * events to testmode.
5976 */
5977 if (consumed && id != WMI_10_4_READY_EVENTID) {
5978 ath10k_dbg(ar, ATH10K_DBG_WMI,
5979 "wmi testmode consumed 0x%x\n", id);
5980 goto out;
5981 }
5982
5983 switch (id) {
5984 case WMI_10_4_MGMT_RX_EVENTID:
5985 ath10k_wmi_event_mgmt_rx(ar, skb);
5986 /* mgmt_rx() owns the skb now! */
5987 return;
5988 case WMI_10_4_ECHO_EVENTID:
5989 ath10k_wmi_event_echo(ar, skb);
5990 break;
5991 case WMI_10_4_DEBUG_MESG_EVENTID:
5992 ath10k_wmi_event_debug_mesg(ar, skb);
5993 ath10k_wmi_queue_set_coverage_class_work(ar);
5994 break;
5995 case WMI_10_4_SERVICE_READY_EVENTID:
5996 ath10k_wmi_event_service_ready(ar, skb);
5997 return;
5998 case WMI_10_4_SCAN_EVENTID:
5999 ath10k_wmi_event_scan(ar, skb);
6000 ath10k_wmi_queue_set_coverage_class_work(ar);
6001 break;
6002 case WMI_10_4_CHAN_INFO_EVENTID:
6003 ath10k_wmi_event_chan_info(ar, skb);
6004 break;
6005 case WMI_10_4_PHYERR_EVENTID:
6006 ath10k_wmi_event_phyerr(ar, skb);
6007 break;
6008 case WMI_10_4_READY_EVENTID:
6009 ath10k_wmi_event_ready(ar, skb);
6010 ath10k_wmi_queue_set_coverage_class_work(ar);
6011 break;
6012 case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
6013 ath10k_wmi_event_peer_sta_kickout(ar, skb);
6014 break;
6015 case WMI_10_4_ROAM_EVENTID:
6016 ath10k_wmi_event_roam(ar, skb);
6017 ath10k_wmi_queue_set_coverage_class_work(ar);
6018 break;
6019 case WMI_10_4_HOST_SWBA_EVENTID:
6020 ath10k_wmi_event_host_swba(ar, skb);
6021 break;
6022 case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
6023 ath10k_wmi_event_tbttoffset_update(ar, skb);
6024 break;
6025 case WMI_10_4_DEBUG_PRINT_EVENTID:
6026 ath10k_wmi_event_debug_print(ar, skb);
6027 ath10k_wmi_queue_set_coverage_class_work(ar);
6028 break;
6029 case WMI_10_4_VDEV_START_RESP_EVENTID:
6030 ath10k_wmi_event_vdev_start_resp(ar, skb);
6031 ath10k_wmi_queue_set_coverage_class_work(ar);
6032 break;
6033 case WMI_10_4_VDEV_STOPPED_EVENTID:
6034 ath10k_wmi_event_vdev_stopped(ar, skb);
6035 ath10k_wmi_queue_set_coverage_class_work(ar);
6036 break;
6037 case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
6038 case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
6039 case WMI_10_4_WDS_PEER_EVENTID:
6040 case WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID:
6041 ath10k_dbg(ar, ATH10K_DBG_WMI,
6042 "received event id %d not implemented\n", id);
6043 break;
6044 case WMI_10_4_UPDATE_STATS_EVENTID:
6045 ath10k_wmi_event_update_stats(ar, skb);
6046 break;
6047 case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
6048 ath10k_wmi_event_temperature(ar, skb);
6049 break;
6050 case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
6051 ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6052 break;
6053 case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
6054 ath10k_wmi_event_pdev_tpc_config(ar, skb);
6055 break;
6056 case WMI_10_4_TDLS_PEER_EVENTID:
6057 ath10k_wmi_handle_tdls_peer_event(ar, skb);
6058 break;
6059 case WMI_10_4_PDEV_TPC_TABLE_EVENTID:
6060 ath10k_wmi_event_tpc_final_table(ar, skb);
6061 break;
6062 case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
6063 ath10k_wmi_event_dfs_status_check(ar, skb);
6064 break;
6065 default:
6066 ath10k_warn(ar, "Unknown eventid: %d\n", id);
6067 break;
6068 }
6069
6070 out:
6071 dev_kfree_skb(skb);
6072 }
6073
ath10k_wmi_process_rx(struct ath10k * ar,struct sk_buff * skb)6074 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
6075 {
6076 int ret;
6077
6078 ret = ath10k_wmi_rx(ar, skb);
6079 if (ret)
6080 ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
6081 }
6082
ath10k_wmi_connect(struct ath10k * ar)6083 int ath10k_wmi_connect(struct ath10k *ar)
6084 {
6085 int status;
6086 struct ath10k_htc_svc_conn_req conn_req;
6087 struct ath10k_htc_svc_conn_resp conn_resp;
6088
6089 memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
6090
6091 memset(&conn_req, 0, sizeof(conn_req));
6092 memset(&conn_resp, 0, sizeof(conn_resp));
6093
6094 /* these fields are the same for all service endpoints */
6095 conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
6096 conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
6097 conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
6098
6099 /* connect to control service */
6100 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
6101
6102 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
6103 if (status) {
6104 ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
6105 status);
6106 return status;
6107 }
6108
6109 ar->wmi.eid = conn_resp.eid;
6110 return 0;
6111 }
6112
6113 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_rd(struct ath10k * ar,u16 rd,u16 rd2g,u16 rd5g,u16 ctl2g,u16 ctl5g,enum wmi_dfs_region dfs_reg)6114 ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
6115 u16 ctl2g, u16 ctl5g,
6116 enum wmi_dfs_region dfs_reg)
6117 {
6118 struct wmi_pdev_set_regdomain_cmd *cmd;
6119 struct sk_buff *skb;
6120
6121 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6122 if (!skb)
6123 return ERR_PTR(-ENOMEM);
6124
6125 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
6126 cmd->reg_domain = __cpu_to_le32(rd);
6127 cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6128 cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6129 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6130 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6131
6132 ath10k_dbg(ar, ATH10K_DBG_WMI,
6133 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
6134 rd, rd2g, rd5g, ctl2g, ctl5g);
6135 return skb;
6136 }
6137
6138 static struct sk_buff *
ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k * ar,u16 rd,u16 rd2g,u16 rd5g,u16 ctl2g,u16 ctl5g,enum wmi_dfs_region dfs_reg)6139 ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
6140 rd5g, u16 ctl2g, u16 ctl5g,
6141 enum wmi_dfs_region dfs_reg)
6142 {
6143 struct wmi_pdev_set_regdomain_cmd_10x *cmd;
6144 struct sk_buff *skb;
6145
6146 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6147 if (!skb)
6148 return ERR_PTR(-ENOMEM);
6149
6150 cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
6151 cmd->reg_domain = __cpu_to_le32(rd);
6152 cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6153 cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6154 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6155 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6156 cmd->dfs_domain = __cpu_to_le32(dfs_reg);
6157
6158 ath10k_dbg(ar, ATH10K_DBG_WMI,
6159 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
6160 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
6161 return skb;
6162 }
6163
6164 static struct sk_buff *
ath10k_wmi_op_gen_pdev_suspend(struct ath10k * ar,u32 suspend_opt)6165 ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
6166 {
6167 struct wmi_pdev_suspend_cmd *cmd;
6168 struct sk_buff *skb;
6169
6170 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6171 if (!skb)
6172 return ERR_PTR(-ENOMEM);
6173
6174 cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
6175 cmd->suspend_opt = __cpu_to_le32(suspend_opt);
6176
6177 return skb;
6178 }
6179
6180 static struct sk_buff *
ath10k_wmi_op_gen_pdev_resume(struct ath10k * ar)6181 ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
6182 {
6183 struct sk_buff *skb;
6184
6185 skb = ath10k_wmi_alloc_skb(ar, 0);
6186 if (!skb)
6187 return ERR_PTR(-ENOMEM);
6188
6189 return skb;
6190 }
6191
6192 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_param(struct ath10k * ar,u32 id,u32 value)6193 ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
6194 {
6195 struct wmi_pdev_set_param_cmd *cmd;
6196 struct sk_buff *skb;
6197
6198 if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
6199 ath10k_warn(ar, "pdev param %d not supported by firmware\n",
6200 id);
6201 return ERR_PTR(-EOPNOTSUPP);
6202 }
6203
6204 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6205 if (!skb)
6206 return ERR_PTR(-ENOMEM);
6207
6208 cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
6209 cmd->param_id = __cpu_to_le32(id);
6210 cmd->param_value = __cpu_to_le32(value);
6211
6212 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
6213 id, value);
6214 return skb;
6215 }
6216
ath10k_wmi_put_host_mem_chunks(struct ath10k * ar,struct wmi_host_mem_chunks * chunks)6217 void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
6218 struct wmi_host_mem_chunks *chunks)
6219 {
6220 struct host_memory_chunk *chunk;
6221 int i;
6222
6223 chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
6224
6225 for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
6226 chunk = &chunks->items[i];
6227 chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
6228 chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
6229 chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
6230
6231 ath10k_dbg(ar, ATH10K_DBG_WMI,
6232 "wmi chunk %d len %d requested, addr 0x%llx\n",
6233 i,
6234 ar->wmi.mem_chunks[i].len,
6235 (unsigned long long)ar->wmi.mem_chunks[i].paddr);
6236 }
6237 }
6238
ath10k_wmi_op_gen_init(struct ath10k * ar)6239 static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
6240 {
6241 struct wmi_init_cmd *cmd;
6242 struct sk_buff *buf;
6243 struct wmi_resource_config config = {};
6244 u32 len, val;
6245
6246 config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
6247 config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
6248 config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
6249
6250 config.num_offload_reorder_bufs =
6251 __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
6252
6253 config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
6254 config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
6255 config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
6256 config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
6257 config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
6258 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6259 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6260 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6261 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
6262 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6263 config.scan_max_pending_reqs =
6264 __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
6265
6266 config.bmiss_offload_max_vdev =
6267 __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
6268
6269 config.roam_offload_max_vdev =
6270 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
6271
6272 config.roam_offload_max_ap_profiles =
6273 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
6274
6275 config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
6276 config.num_mcast_table_elems =
6277 __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
6278
6279 config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
6280 config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
6281 config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
6282 config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
6283 config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
6284
6285 val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6286 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6287
6288 config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
6289
6290 config.gtk_offload_max_vdev =
6291 __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
6292
6293 config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
6294 config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
6295
6296 len = sizeof(*cmd) +
6297 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
6298
6299 buf = ath10k_wmi_alloc_skb(ar, len);
6300 if (!buf)
6301 return ERR_PTR(-ENOMEM);
6302
6303 cmd = (struct wmi_init_cmd *)buf->data;
6304
6305 memcpy(&cmd->resource_config, &config, sizeof(config));
6306 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6307
6308 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
6309 return buf;
6310 }
6311
ath10k_wmi_10_1_op_gen_init(struct ath10k * ar)6312 static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
6313 {
6314 struct wmi_init_cmd_10x *cmd;
6315 struct sk_buff *buf;
6316 struct wmi_resource_config_10x config = {};
6317 u32 len, val;
6318
6319 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6320 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6321 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6322 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6323 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6324 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6325 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6326 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6327 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6328 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6329 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6330 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6331 config.scan_max_pending_reqs =
6332 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6333
6334 config.bmiss_offload_max_vdev =
6335 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6336
6337 config.roam_offload_max_vdev =
6338 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6339
6340 config.roam_offload_max_ap_profiles =
6341 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6342
6343 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6344 config.num_mcast_table_elems =
6345 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6346
6347 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6348 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6349 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6350 config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
6351 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6352
6353 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6354 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6355
6356 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6357
6358 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6359 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6360
6361 len = sizeof(*cmd) +
6362 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
6363
6364 buf = ath10k_wmi_alloc_skb(ar, len);
6365 if (!buf)
6366 return ERR_PTR(-ENOMEM);
6367
6368 cmd = (struct wmi_init_cmd_10x *)buf->data;
6369
6370 memcpy(&cmd->resource_config, &config, sizeof(config));
6371 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6372
6373 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
6374 return buf;
6375 }
6376
ath10k_wmi_10_2_op_gen_init(struct ath10k * ar)6377 static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
6378 {
6379 struct wmi_init_cmd_10_2 *cmd;
6380 struct sk_buff *buf;
6381 struct wmi_resource_config_10x config = {};
6382 u32 len, val, features;
6383
6384 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6385 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6386
6387 if (ath10k_peer_stats_enabled(ar)) {
6388 config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
6389 config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
6390 } else {
6391 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6392 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6393 }
6394
6395 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6396 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6397 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6398 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6399 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6400 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6401 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6402 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6403
6404 config.scan_max_pending_reqs =
6405 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6406
6407 config.bmiss_offload_max_vdev =
6408 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6409
6410 config.roam_offload_max_vdev =
6411 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6412
6413 config.roam_offload_max_ap_profiles =
6414 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6415
6416 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6417 config.num_mcast_table_elems =
6418 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6419
6420 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6421 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6422 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6423 config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
6424 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6425
6426 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6427 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6428
6429 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6430
6431 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6432 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6433
6434 len = sizeof(*cmd) +
6435 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
6436
6437 buf = ath10k_wmi_alloc_skb(ar, len);
6438 if (!buf)
6439 return ERR_PTR(-ENOMEM);
6440
6441 cmd = (struct wmi_init_cmd_10_2 *)buf->data;
6442
6443 features = WMI_10_2_RX_BATCH_MODE;
6444
6445 if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) &&
6446 test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
6447 features |= WMI_10_2_COEX_GPIO;
6448
6449 if (ath10k_peer_stats_enabled(ar))
6450 features |= WMI_10_2_PEER_STATS;
6451
6452 if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
6453 features |= WMI_10_2_BSS_CHAN_INFO;
6454
6455 cmd->resource_config.feature_mask = __cpu_to_le32(features);
6456
6457 memcpy(&cmd->resource_config.common, &config, sizeof(config));
6458 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6459
6460 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
6461 return buf;
6462 }
6463
ath10k_wmi_10_4_op_gen_init(struct ath10k * ar)6464 static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
6465 {
6466 struct wmi_init_cmd_10_4 *cmd;
6467 struct sk_buff *buf;
6468 struct wmi_resource_config_10_4 config = {};
6469 u32 len;
6470
6471 config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
6472 config.num_peers = __cpu_to_le32(ar->max_num_peers);
6473 config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
6474 config.num_tids = __cpu_to_le32(ar->num_tids);
6475
6476 config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
6477 config.num_offload_reorder_buffs =
6478 __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
6479 config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
6480 config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
6481 config.tx_chain_mask = __cpu_to_le32(ar->hw_params.tx_chain_mask);
6482 config.rx_chain_mask = __cpu_to_le32(ar->hw_params.rx_chain_mask);
6483
6484 config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6485 config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6486 config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6487 config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
6488
6489 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6490 config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
6491 config.bmiss_offload_max_vdev =
6492 __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
6493 config.roam_offload_max_vdev =
6494 __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
6495 config.roam_offload_max_ap_profiles =
6496 __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
6497 config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
6498 config.num_mcast_table_elems =
6499 __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
6500
6501 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
6502 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
6503 config.num_wds_entries = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
6504 config.dma_burst_size = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
6505 config.mac_aggr_delim = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
6506
6507 config.rx_skip_defrag_timeout_dup_detection_check =
6508 __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
6509
6510 config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
6511 config.gtk_offload_max_vdev =
6512 __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
6513 config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
6514 config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
6515 config.max_peer_ext_stats =
6516 __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
6517 config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
6518
6519 config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
6520 config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
6521 config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
6522 config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
6523
6524 config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
6525 config.tt_support =
6526 __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
6527 config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
6528 config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
6529 config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
6530
6531 len = sizeof(*cmd) +
6532 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
6533
6534 buf = ath10k_wmi_alloc_skb(ar, len);
6535 if (!buf)
6536 return ERR_PTR(-ENOMEM);
6537
6538 cmd = (struct wmi_init_cmd_10_4 *)buf->data;
6539 memcpy(&cmd->resource_config, &config, sizeof(config));
6540 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6541
6542 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
6543 return buf;
6544 }
6545
ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg * arg)6546 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
6547 {
6548 if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
6549 return -EINVAL;
6550 if (arg->n_channels > ARRAY_SIZE(arg->channels))
6551 return -EINVAL;
6552 if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
6553 return -EINVAL;
6554 if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
6555 return -EINVAL;
6556
6557 return 0;
6558 }
6559
6560 static size_t
ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg * arg)6561 ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
6562 {
6563 int len = 0;
6564
6565 if (arg->ie_len) {
6566 len += sizeof(struct wmi_ie_data);
6567 len += roundup(arg->ie_len, 4);
6568 }
6569
6570 if (arg->n_channels) {
6571 len += sizeof(struct wmi_chan_list);
6572 len += sizeof(__le32) * arg->n_channels;
6573 }
6574
6575 if (arg->n_ssids) {
6576 len += sizeof(struct wmi_ssid_list);
6577 len += sizeof(struct wmi_ssid) * arg->n_ssids;
6578 }
6579
6580 if (arg->n_bssids) {
6581 len += sizeof(struct wmi_bssid_list);
6582 len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
6583 }
6584
6585 return len;
6586 }
6587
ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common * cmn,const struct wmi_start_scan_arg * arg)6588 void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
6589 const struct wmi_start_scan_arg *arg)
6590 {
6591 u32 scan_id;
6592 u32 scan_req_id;
6593
6594 scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
6595 scan_id |= arg->scan_id;
6596
6597 scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
6598 scan_req_id |= arg->scan_req_id;
6599
6600 cmn->scan_id = __cpu_to_le32(scan_id);
6601 cmn->scan_req_id = __cpu_to_le32(scan_req_id);
6602 cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
6603 cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
6604 cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
6605 cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
6606 cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
6607 cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
6608 cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
6609 cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
6610 cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
6611 cmn->idle_time = __cpu_to_le32(arg->idle_time);
6612 cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
6613 cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
6614 cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
6615 }
6616
6617 static void
ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs * tlvs,const struct wmi_start_scan_arg * arg)6618 ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
6619 const struct wmi_start_scan_arg *arg)
6620 {
6621 struct wmi_ie_data *ie;
6622 struct wmi_chan_list *channels;
6623 struct wmi_ssid_list *ssids;
6624 struct wmi_bssid_list *bssids;
6625 void *ptr = tlvs->tlvs;
6626 int i;
6627
6628 if (arg->n_channels) {
6629 channels = ptr;
6630 channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
6631 channels->num_chan = __cpu_to_le32(arg->n_channels);
6632
6633 for (i = 0; i < arg->n_channels; i++)
6634 channels->channel_list[i].freq =
6635 __cpu_to_le16(arg->channels[i]);
6636
6637 ptr += sizeof(*channels);
6638 ptr += sizeof(__le32) * arg->n_channels;
6639 }
6640
6641 if (arg->n_ssids) {
6642 ssids = ptr;
6643 ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
6644 ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
6645
6646 for (i = 0; i < arg->n_ssids; i++) {
6647 ssids->ssids[i].ssid_len =
6648 __cpu_to_le32(arg->ssids[i].len);
6649 memcpy(&ssids->ssids[i].ssid,
6650 arg->ssids[i].ssid,
6651 arg->ssids[i].len);
6652 }
6653
6654 ptr += sizeof(*ssids);
6655 ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
6656 }
6657
6658 if (arg->n_bssids) {
6659 bssids = ptr;
6660 bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
6661 bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
6662
6663 for (i = 0; i < arg->n_bssids; i++)
6664 ether_addr_copy(bssids->bssid_list[i].addr,
6665 arg->bssids[i].bssid);
6666
6667 ptr += sizeof(*bssids);
6668 ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
6669 }
6670
6671 if (arg->ie_len) {
6672 ie = ptr;
6673 ie->tag = __cpu_to_le32(WMI_IE_TAG);
6674 ie->ie_len = __cpu_to_le32(arg->ie_len);
6675 memcpy(ie->ie_data, arg->ie, arg->ie_len);
6676
6677 ptr += sizeof(*ie);
6678 ptr += roundup(arg->ie_len, 4);
6679 }
6680 }
6681
6682 static struct sk_buff *
ath10k_wmi_op_gen_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)6683 ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
6684 const struct wmi_start_scan_arg *arg)
6685 {
6686 struct wmi_start_scan_cmd *cmd;
6687 struct sk_buff *skb;
6688 size_t len;
6689 int ret;
6690
6691 ret = ath10k_wmi_start_scan_verify(arg);
6692 if (ret)
6693 return ERR_PTR(ret);
6694
6695 len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
6696 skb = ath10k_wmi_alloc_skb(ar, len);
6697 if (!skb)
6698 return ERR_PTR(-ENOMEM);
6699
6700 cmd = (struct wmi_start_scan_cmd *)skb->data;
6701
6702 ath10k_wmi_put_start_scan_common(&cmd->common, arg);
6703 ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
6704
6705 cmd->burst_duration_ms = __cpu_to_le32(0);
6706
6707 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
6708 return skb;
6709 }
6710
6711 static struct sk_buff *
ath10k_wmi_10x_op_gen_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)6712 ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
6713 const struct wmi_start_scan_arg *arg)
6714 {
6715 struct wmi_10x_start_scan_cmd *cmd;
6716 struct sk_buff *skb;
6717 size_t len;
6718 int ret;
6719
6720 ret = ath10k_wmi_start_scan_verify(arg);
6721 if (ret)
6722 return ERR_PTR(ret);
6723
6724 len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
6725 skb = ath10k_wmi_alloc_skb(ar, len);
6726 if (!skb)
6727 return ERR_PTR(-ENOMEM);
6728
6729 cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
6730
6731 ath10k_wmi_put_start_scan_common(&cmd->common, arg);
6732 ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
6733
6734 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
6735 return skb;
6736 }
6737
ath10k_wmi_start_scan_init(struct ath10k * ar,struct wmi_start_scan_arg * arg)6738 void ath10k_wmi_start_scan_init(struct ath10k *ar,
6739 struct wmi_start_scan_arg *arg)
6740 {
6741 /* setup commonly used values */
6742 arg->scan_req_id = 1;
6743 arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
6744 arg->dwell_time_active = 50;
6745 arg->dwell_time_passive = 150;
6746 arg->min_rest_time = 50;
6747 arg->max_rest_time = 500;
6748 arg->repeat_probe_time = 0;
6749 arg->probe_spacing_time = 0;
6750 arg->idle_time = 0;
6751 arg->max_scan_time = 20000;
6752 arg->probe_delay = 5;
6753 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
6754 | WMI_SCAN_EVENT_COMPLETED
6755 | WMI_SCAN_EVENT_BSS_CHANNEL
6756 | WMI_SCAN_EVENT_FOREIGN_CHANNEL
6757 | WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT
6758 | WMI_SCAN_EVENT_DEQUEUED;
6759 arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
6760 arg->n_bssids = 1;
6761 arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
6762 }
6763
6764 static struct sk_buff *
ath10k_wmi_op_gen_stop_scan(struct ath10k * ar,const struct wmi_stop_scan_arg * arg)6765 ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
6766 const struct wmi_stop_scan_arg *arg)
6767 {
6768 struct wmi_stop_scan_cmd *cmd;
6769 struct sk_buff *skb;
6770 u32 scan_id;
6771 u32 req_id;
6772
6773 if (arg->req_id > 0xFFF)
6774 return ERR_PTR(-EINVAL);
6775 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
6776 return ERR_PTR(-EINVAL);
6777
6778 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6779 if (!skb)
6780 return ERR_PTR(-ENOMEM);
6781
6782 scan_id = arg->u.scan_id;
6783 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
6784
6785 req_id = arg->req_id;
6786 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
6787
6788 cmd = (struct wmi_stop_scan_cmd *)skb->data;
6789 cmd->req_type = __cpu_to_le32(arg->req_type);
6790 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
6791 cmd->scan_id = __cpu_to_le32(scan_id);
6792 cmd->scan_req_id = __cpu_to_le32(req_id);
6793
6794 ath10k_dbg(ar, ATH10K_DBG_WMI,
6795 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
6796 arg->req_id, arg->req_type, arg->u.scan_id);
6797 return skb;
6798 }
6799
6800 static struct sk_buff *
ath10k_wmi_op_gen_vdev_create(struct ath10k * ar,u32 vdev_id,enum wmi_vdev_type type,enum wmi_vdev_subtype subtype,const u8 macaddr[ETH_ALEN])6801 ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
6802 enum wmi_vdev_type type,
6803 enum wmi_vdev_subtype subtype,
6804 const u8 macaddr[ETH_ALEN])
6805 {
6806 struct wmi_vdev_create_cmd *cmd;
6807 struct sk_buff *skb;
6808
6809 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6810 if (!skb)
6811 return ERR_PTR(-ENOMEM);
6812
6813 cmd = (struct wmi_vdev_create_cmd *)skb->data;
6814 cmd->vdev_id = __cpu_to_le32(vdev_id);
6815 cmd->vdev_type = __cpu_to_le32(type);
6816 cmd->vdev_subtype = __cpu_to_le32(subtype);
6817 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
6818
6819 ath10k_dbg(ar, ATH10K_DBG_WMI,
6820 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
6821 vdev_id, type, subtype, macaddr);
6822 return skb;
6823 }
6824
6825 static struct sk_buff *
ath10k_wmi_op_gen_vdev_delete(struct ath10k * ar,u32 vdev_id)6826 ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
6827 {
6828 struct wmi_vdev_delete_cmd *cmd;
6829 struct sk_buff *skb;
6830
6831 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6832 if (!skb)
6833 return ERR_PTR(-ENOMEM);
6834
6835 cmd = (struct wmi_vdev_delete_cmd *)skb->data;
6836 cmd->vdev_id = __cpu_to_le32(vdev_id);
6837
6838 ath10k_dbg(ar, ATH10K_DBG_WMI,
6839 "WMI vdev delete id %d\n", vdev_id);
6840 return skb;
6841 }
6842
6843 static struct sk_buff *
ath10k_wmi_op_gen_vdev_start(struct ath10k * ar,const struct wmi_vdev_start_request_arg * arg,bool restart)6844 ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
6845 const struct wmi_vdev_start_request_arg *arg,
6846 bool restart)
6847 {
6848 struct wmi_vdev_start_request_cmd *cmd;
6849 struct sk_buff *skb;
6850 const char *cmdname;
6851 u32 flags = 0;
6852
6853 if (WARN_ON(arg->hidden_ssid && !arg->ssid))
6854 return ERR_PTR(-EINVAL);
6855 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
6856 return ERR_PTR(-EINVAL);
6857
6858 if (restart)
6859 cmdname = "restart";
6860 else
6861 cmdname = "start";
6862
6863 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6864 if (!skb)
6865 return ERR_PTR(-ENOMEM);
6866
6867 if (arg->hidden_ssid)
6868 flags |= WMI_VDEV_START_HIDDEN_SSID;
6869 if (arg->pmf_enabled)
6870 flags |= WMI_VDEV_START_PMF_ENABLED;
6871
6872 cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
6873 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
6874 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
6875 cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
6876 cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
6877 cmd->flags = __cpu_to_le32(flags);
6878 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
6879 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
6880
6881 if (arg->ssid) {
6882 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
6883 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
6884 }
6885
6886 ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
6887
6888 ath10k_dbg(ar, ATH10K_DBG_WMI,
6889 "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
6890 cmdname, arg->vdev_id,
6891 flags, arg->channel.freq, arg->channel.mode,
6892 cmd->chan.flags, arg->channel.max_power);
6893
6894 return skb;
6895 }
6896
6897 static struct sk_buff *
ath10k_wmi_op_gen_vdev_stop(struct ath10k * ar,u32 vdev_id)6898 ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
6899 {
6900 struct wmi_vdev_stop_cmd *cmd;
6901 struct sk_buff *skb;
6902
6903 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6904 if (!skb)
6905 return ERR_PTR(-ENOMEM);
6906
6907 cmd = (struct wmi_vdev_stop_cmd *)skb->data;
6908 cmd->vdev_id = __cpu_to_le32(vdev_id);
6909
6910 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
6911 return skb;
6912 }
6913
6914 static struct sk_buff *
ath10k_wmi_op_gen_vdev_up(struct ath10k * ar,u32 vdev_id,u32 aid,const u8 * bssid)6915 ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
6916 const u8 *bssid)
6917 {
6918 struct wmi_vdev_up_cmd *cmd;
6919 struct sk_buff *skb;
6920
6921 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6922 if (!skb)
6923 return ERR_PTR(-ENOMEM);
6924
6925 cmd = (struct wmi_vdev_up_cmd *)skb->data;
6926 cmd->vdev_id = __cpu_to_le32(vdev_id);
6927 cmd->vdev_assoc_id = __cpu_to_le32(aid);
6928 ether_addr_copy(cmd->vdev_bssid.addr, bssid);
6929
6930 ath10k_dbg(ar, ATH10K_DBG_WMI,
6931 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
6932 vdev_id, aid, bssid);
6933 return skb;
6934 }
6935
6936 static struct sk_buff *
ath10k_wmi_op_gen_vdev_down(struct ath10k * ar,u32 vdev_id)6937 ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
6938 {
6939 struct wmi_vdev_down_cmd *cmd;
6940 struct sk_buff *skb;
6941
6942 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6943 if (!skb)
6944 return ERR_PTR(-ENOMEM);
6945
6946 cmd = (struct wmi_vdev_down_cmd *)skb->data;
6947 cmd->vdev_id = __cpu_to_le32(vdev_id);
6948
6949 ath10k_dbg(ar, ATH10K_DBG_WMI,
6950 "wmi mgmt vdev down id 0x%x\n", vdev_id);
6951 return skb;
6952 }
6953
6954 static struct sk_buff *
ath10k_wmi_op_gen_vdev_set_param(struct ath10k * ar,u32 vdev_id,u32 param_id,u32 param_value)6955 ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
6956 u32 param_id, u32 param_value)
6957 {
6958 struct wmi_vdev_set_param_cmd *cmd;
6959 struct sk_buff *skb;
6960
6961 if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
6962 ath10k_dbg(ar, ATH10K_DBG_WMI,
6963 "vdev param %d not supported by firmware\n",
6964 param_id);
6965 return ERR_PTR(-EOPNOTSUPP);
6966 }
6967
6968 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6969 if (!skb)
6970 return ERR_PTR(-ENOMEM);
6971
6972 cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
6973 cmd->vdev_id = __cpu_to_le32(vdev_id);
6974 cmd->param_id = __cpu_to_le32(param_id);
6975 cmd->param_value = __cpu_to_le32(param_value);
6976
6977 ath10k_dbg(ar, ATH10K_DBG_WMI,
6978 "wmi vdev id 0x%x set param %d value %d\n",
6979 vdev_id, param_id, param_value);
6980 return skb;
6981 }
6982
6983 static struct sk_buff *
ath10k_wmi_op_gen_vdev_install_key(struct ath10k * ar,const struct wmi_vdev_install_key_arg * arg)6984 ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
6985 const struct wmi_vdev_install_key_arg *arg)
6986 {
6987 struct wmi_vdev_install_key_cmd *cmd;
6988 struct sk_buff *skb;
6989
6990 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
6991 return ERR_PTR(-EINVAL);
6992 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
6993 return ERR_PTR(-EINVAL);
6994
6995 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
6996 if (!skb)
6997 return ERR_PTR(-ENOMEM);
6998
6999 cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
7000 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7001 cmd->key_idx = __cpu_to_le32(arg->key_idx);
7002 cmd->key_flags = __cpu_to_le32(arg->key_flags);
7003 cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
7004 cmd->key_len = __cpu_to_le32(arg->key_len);
7005 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
7006 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
7007
7008 if (arg->macaddr)
7009 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
7010 if (arg->key_data)
7011 memcpy(cmd->key_data, arg->key_data, arg->key_len);
7012
7013 ath10k_dbg(ar, ATH10K_DBG_WMI,
7014 "wmi vdev install key idx %d cipher %d len %d\n",
7015 arg->key_idx, arg->key_cipher, arg->key_len);
7016 return skb;
7017 }
7018
7019 static struct sk_buff *
ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k * ar,const struct wmi_vdev_spectral_conf_arg * arg)7020 ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
7021 const struct wmi_vdev_spectral_conf_arg *arg)
7022 {
7023 struct wmi_vdev_spectral_conf_cmd *cmd;
7024 struct sk_buff *skb;
7025
7026 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7027 if (!skb)
7028 return ERR_PTR(-ENOMEM);
7029
7030 cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
7031 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7032 cmd->scan_count = __cpu_to_le32(arg->scan_count);
7033 cmd->scan_period = __cpu_to_le32(arg->scan_period);
7034 cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
7035 cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
7036 cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
7037 cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
7038 cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
7039 cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
7040 cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
7041 cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
7042 cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
7043 cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
7044 cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
7045 cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
7046 cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
7047 cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
7048 cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
7049 cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
7050
7051 return skb;
7052 }
7053
7054 static struct sk_buff *
ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k * ar,u32 vdev_id,u32 trigger,u32 enable)7055 ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
7056 u32 trigger, u32 enable)
7057 {
7058 struct wmi_vdev_spectral_enable_cmd *cmd;
7059 struct sk_buff *skb;
7060
7061 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7062 if (!skb)
7063 return ERR_PTR(-ENOMEM);
7064
7065 cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
7066 cmd->vdev_id = __cpu_to_le32(vdev_id);
7067 cmd->trigger_cmd = __cpu_to_le32(trigger);
7068 cmd->enable_cmd = __cpu_to_le32(enable);
7069
7070 return skb;
7071 }
7072
7073 static struct sk_buff *
ath10k_wmi_op_gen_peer_create(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN],enum wmi_peer_type peer_type)7074 ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
7075 const u8 peer_addr[ETH_ALEN],
7076 enum wmi_peer_type peer_type)
7077 {
7078 struct wmi_peer_create_cmd *cmd;
7079 struct sk_buff *skb;
7080
7081 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7082 if (!skb)
7083 return ERR_PTR(-ENOMEM);
7084
7085 cmd = (struct wmi_peer_create_cmd *)skb->data;
7086 cmd->vdev_id = __cpu_to_le32(vdev_id);
7087 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7088 cmd->peer_type = __cpu_to_le32(peer_type);
7089
7090 ath10k_dbg(ar, ATH10K_DBG_WMI,
7091 "wmi peer create vdev_id %d peer_addr %pM\n",
7092 vdev_id, peer_addr);
7093 return skb;
7094 }
7095
7096 static struct sk_buff *
ath10k_wmi_op_gen_peer_delete(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN])7097 ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
7098 const u8 peer_addr[ETH_ALEN])
7099 {
7100 struct wmi_peer_delete_cmd *cmd;
7101 struct sk_buff *skb;
7102
7103 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7104 if (!skb)
7105 return ERR_PTR(-ENOMEM);
7106
7107 cmd = (struct wmi_peer_delete_cmd *)skb->data;
7108 cmd->vdev_id = __cpu_to_le32(vdev_id);
7109 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7110
7111 ath10k_dbg(ar, ATH10K_DBG_WMI,
7112 "wmi peer delete vdev_id %d peer_addr %pM\n",
7113 vdev_id, peer_addr);
7114 return skb;
7115 }
7116
7117 static struct sk_buff *
ath10k_wmi_op_gen_peer_flush(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN],u32 tid_bitmap)7118 ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
7119 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
7120 {
7121 struct wmi_peer_flush_tids_cmd *cmd;
7122 struct sk_buff *skb;
7123
7124 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7125 if (!skb)
7126 return ERR_PTR(-ENOMEM);
7127
7128 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
7129 cmd->vdev_id = __cpu_to_le32(vdev_id);
7130 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
7131 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7132
7133 ath10k_dbg(ar, ATH10K_DBG_WMI,
7134 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
7135 vdev_id, peer_addr, tid_bitmap);
7136 return skb;
7137 }
7138
7139 static struct sk_buff *
ath10k_wmi_op_gen_peer_set_param(struct ath10k * ar,u32 vdev_id,const u8 * peer_addr,enum wmi_peer_param param_id,u32 param_value)7140 ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
7141 const u8 *peer_addr,
7142 enum wmi_peer_param param_id,
7143 u32 param_value)
7144 {
7145 struct wmi_peer_set_param_cmd *cmd;
7146 struct sk_buff *skb;
7147
7148 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7149 if (!skb)
7150 return ERR_PTR(-ENOMEM);
7151
7152 cmd = (struct wmi_peer_set_param_cmd *)skb->data;
7153 cmd->vdev_id = __cpu_to_le32(vdev_id);
7154 cmd->param_id = __cpu_to_le32(param_id);
7155 cmd->param_value = __cpu_to_le32(param_value);
7156 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7157
7158 ath10k_dbg(ar, ATH10K_DBG_WMI,
7159 "wmi vdev %d peer 0x%pM set param %d value %d\n",
7160 vdev_id, peer_addr, param_id, param_value);
7161 return skb;
7162 }
7163
7164 static struct sk_buff *
ath10k_wmi_op_gen_set_psmode(struct ath10k * ar,u32 vdev_id,enum wmi_sta_ps_mode psmode)7165 ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
7166 enum wmi_sta_ps_mode psmode)
7167 {
7168 struct wmi_sta_powersave_mode_cmd *cmd;
7169 struct sk_buff *skb;
7170
7171 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7172 if (!skb)
7173 return ERR_PTR(-ENOMEM);
7174
7175 cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
7176 cmd->vdev_id = __cpu_to_le32(vdev_id);
7177 cmd->sta_ps_mode = __cpu_to_le32(psmode);
7178
7179 ath10k_dbg(ar, ATH10K_DBG_WMI,
7180 "wmi set powersave id 0x%x mode %d\n",
7181 vdev_id, psmode);
7182 return skb;
7183 }
7184
7185 static struct sk_buff *
ath10k_wmi_op_gen_set_sta_ps(struct ath10k * ar,u32 vdev_id,enum wmi_sta_powersave_param param_id,u32 value)7186 ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
7187 enum wmi_sta_powersave_param param_id,
7188 u32 value)
7189 {
7190 struct wmi_sta_powersave_param_cmd *cmd;
7191 struct sk_buff *skb;
7192
7193 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7194 if (!skb)
7195 return ERR_PTR(-ENOMEM);
7196
7197 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
7198 cmd->vdev_id = __cpu_to_le32(vdev_id);
7199 cmd->param_id = __cpu_to_le32(param_id);
7200 cmd->param_value = __cpu_to_le32(value);
7201
7202 ath10k_dbg(ar, ATH10K_DBG_WMI,
7203 "wmi sta ps param vdev_id 0x%x param %d value %d\n",
7204 vdev_id, param_id, value);
7205 return skb;
7206 }
7207
7208 static struct sk_buff *
ath10k_wmi_op_gen_set_ap_ps(struct ath10k * ar,u32 vdev_id,const u8 * mac,enum wmi_ap_ps_peer_param param_id,u32 value)7209 ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
7210 enum wmi_ap_ps_peer_param param_id, u32 value)
7211 {
7212 struct wmi_ap_ps_peer_cmd *cmd;
7213 struct sk_buff *skb;
7214
7215 if (!mac)
7216 return ERR_PTR(-EINVAL);
7217
7218 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7219 if (!skb)
7220 return ERR_PTR(-ENOMEM);
7221
7222 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
7223 cmd->vdev_id = __cpu_to_le32(vdev_id);
7224 cmd->param_id = __cpu_to_le32(param_id);
7225 cmd->param_value = __cpu_to_le32(value);
7226 ether_addr_copy(cmd->peer_macaddr.addr, mac);
7227
7228 ath10k_dbg(ar, ATH10K_DBG_WMI,
7229 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
7230 vdev_id, param_id, value, mac);
7231 return skb;
7232 }
7233
7234 static struct sk_buff *
ath10k_wmi_op_gen_scan_chan_list(struct ath10k * ar,const struct wmi_scan_chan_list_arg * arg)7235 ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
7236 const struct wmi_scan_chan_list_arg *arg)
7237 {
7238 struct wmi_scan_chan_list_cmd *cmd;
7239 struct sk_buff *skb;
7240 struct wmi_channel_arg *ch;
7241 struct wmi_channel *ci;
7242 int len;
7243 int i;
7244
7245 len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
7246
7247 skb = ath10k_wmi_alloc_skb(ar, len);
7248 if (!skb)
7249 return ERR_PTR(-EINVAL);
7250
7251 cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
7252 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
7253
7254 for (i = 0; i < arg->n_channels; i++) {
7255 ch = &arg->channels[i];
7256 ci = &cmd->chan_info[i];
7257
7258 ath10k_wmi_put_wmi_channel(ci, ch);
7259 }
7260
7261 return skb;
7262 }
7263
7264 static void
ath10k_wmi_peer_assoc_fill(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7265 ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
7266 const struct wmi_peer_assoc_complete_arg *arg)
7267 {
7268 struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
7269
7270 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7271 cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
7272 cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
7273 cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
7274 cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
7275 cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
7276 cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
7277 cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
7278 cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
7279 cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
7280 cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
7281 cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
7282 cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
7283
7284 ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
7285
7286 cmd->peer_legacy_rates.num_rates =
7287 __cpu_to_le32(arg->peer_legacy_rates.num_rates);
7288 memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
7289 arg->peer_legacy_rates.num_rates);
7290
7291 cmd->peer_ht_rates.num_rates =
7292 __cpu_to_le32(arg->peer_ht_rates.num_rates);
7293 memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
7294 arg->peer_ht_rates.num_rates);
7295
7296 cmd->peer_vht_rates.rx_max_rate =
7297 __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
7298 cmd->peer_vht_rates.rx_mcs_set =
7299 __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
7300 cmd->peer_vht_rates.tx_max_rate =
7301 __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
7302 cmd->peer_vht_rates.tx_mcs_set =
7303 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
7304 }
7305
7306 static void
ath10k_wmi_peer_assoc_fill_main(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7307 ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
7308 const struct wmi_peer_assoc_complete_arg *arg)
7309 {
7310 struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
7311
7312 ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7313 memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
7314 }
7315
7316 static void
ath10k_wmi_peer_assoc_fill_10_1(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7317 ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
7318 const struct wmi_peer_assoc_complete_arg *arg)
7319 {
7320 ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7321 }
7322
7323 static void
ath10k_wmi_peer_assoc_fill_10_2(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7324 ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
7325 const struct wmi_peer_assoc_complete_arg *arg)
7326 {
7327 struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
7328 int max_mcs, max_nss;
7329 u32 info0;
7330
7331 /* TODO: Is using max values okay with firmware? */
7332 max_mcs = 0xf;
7333 max_nss = 0xf;
7334
7335 info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
7336 SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
7337
7338 ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7339 cmd->info0 = __cpu_to_le32(info0);
7340 }
7341
7342 static void
ath10k_wmi_peer_assoc_fill_10_4(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7343 ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
7344 const struct wmi_peer_assoc_complete_arg *arg)
7345 {
7346 struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
7347
7348 ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
7349 if (arg->peer_bw_rxnss_override)
7350 cmd->peer_bw_rxnss_override =
7351 __cpu_to_le32((arg->peer_bw_rxnss_override - 1) |
7352 BIT(PEER_BW_RXNSS_OVERRIDE_OFFSET));
7353 else
7354 cmd->peer_bw_rxnss_override = 0;
7355 }
7356
7357 static int
ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg * arg)7358 ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
7359 {
7360 if (arg->peer_mpdu_density > 16)
7361 return -EINVAL;
7362 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
7363 return -EINVAL;
7364 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
7365 return -EINVAL;
7366
7367 return 0;
7368 }
7369
7370 static struct sk_buff *
ath10k_wmi_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7371 ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
7372 const struct wmi_peer_assoc_complete_arg *arg)
7373 {
7374 size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
7375 struct sk_buff *skb;
7376 int ret;
7377
7378 ret = ath10k_wmi_peer_assoc_check_arg(arg);
7379 if (ret)
7380 return ERR_PTR(ret);
7381
7382 skb = ath10k_wmi_alloc_skb(ar, len);
7383 if (!skb)
7384 return ERR_PTR(-ENOMEM);
7385
7386 ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
7387
7388 ath10k_dbg(ar, ATH10K_DBG_WMI,
7389 "wmi peer assoc vdev %d addr %pM (%s)\n",
7390 arg->vdev_id, arg->addr,
7391 arg->peer_reassoc ? "reassociate" : "new");
7392 return skb;
7393 }
7394
7395 static struct sk_buff *
ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7396 ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
7397 const struct wmi_peer_assoc_complete_arg *arg)
7398 {
7399 size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
7400 struct sk_buff *skb;
7401 int ret;
7402
7403 ret = ath10k_wmi_peer_assoc_check_arg(arg);
7404 if (ret)
7405 return ERR_PTR(ret);
7406
7407 skb = ath10k_wmi_alloc_skb(ar, len);
7408 if (!skb)
7409 return ERR_PTR(-ENOMEM);
7410
7411 ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
7412
7413 ath10k_dbg(ar, ATH10K_DBG_WMI,
7414 "wmi peer assoc vdev %d addr %pM (%s)\n",
7415 arg->vdev_id, arg->addr,
7416 arg->peer_reassoc ? "reassociate" : "new");
7417 return skb;
7418 }
7419
7420 static struct sk_buff *
ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7421 ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
7422 const struct wmi_peer_assoc_complete_arg *arg)
7423 {
7424 size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
7425 struct sk_buff *skb;
7426 int ret;
7427
7428 ret = ath10k_wmi_peer_assoc_check_arg(arg);
7429 if (ret)
7430 return ERR_PTR(ret);
7431
7432 skb = ath10k_wmi_alloc_skb(ar, len);
7433 if (!skb)
7434 return ERR_PTR(-ENOMEM);
7435
7436 ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
7437
7438 ath10k_dbg(ar, ATH10K_DBG_WMI,
7439 "wmi peer assoc vdev %d addr %pM (%s)\n",
7440 arg->vdev_id, arg->addr,
7441 arg->peer_reassoc ? "reassociate" : "new");
7442 return skb;
7443 }
7444
7445 static struct sk_buff *
ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7446 ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar,
7447 const struct wmi_peer_assoc_complete_arg *arg)
7448 {
7449 size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd);
7450 struct sk_buff *skb;
7451 int ret;
7452
7453 ret = ath10k_wmi_peer_assoc_check_arg(arg);
7454 if (ret)
7455 return ERR_PTR(ret);
7456
7457 skb = ath10k_wmi_alloc_skb(ar, len);
7458 if (!skb)
7459 return ERR_PTR(-ENOMEM);
7460
7461 ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg);
7462
7463 ath10k_dbg(ar, ATH10K_DBG_WMI,
7464 "wmi peer assoc vdev %d addr %pM (%s)\n",
7465 arg->vdev_id, arg->addr,
7466 arg->peer_reassoc ? "reassociate" : "new");
7467 return skb;
7468 }
7469
7470 static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k * ar)7471 ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
7472 {
7473 struct sk_buff *skb;
7474
7475 skb = ath10k_wmi_alloc_skb(ar, 0);
7476 if (!skb)
7477 return ERR_PTR(-ENOMEM);
7478
7479 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
7480 return skb;
7481 }
7482
7483 static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k * ar,enum wmi_bss_survey_req_type type)7484 ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
7485 enum wmi_bss_survey_req_type type)
7486 {
7487 struct wmi_pdev_chan_info_req_cmd *cmd;
7488 struct sk_buff *skb;
7489
7490 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7491 if (!skb)
7492 return ERR_PTR(-ENOMEM);
7493
7494 cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
7495 cmd->type = __cpu_to_le32(type);
7496
7497 ath10k_dbg(ar, ATH10K_DBG_WMI,
7498 "wmi pdev bss info request type %d\n", type);
7499
7500 return skb;
7501 }
7502
7503 /* This function assumes the beacon is already DMA mapped */
7504 static struct sk_buff *
ath10k_wmi_op_gen_beacon_dma(struct ath10k * ar,u32 vdev_id,const void * bcn,size_t bcn_len,u32 bcn_paddr,bool dtim_zero,bool deliver_cab)7505 ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
7506 size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
7507 bool deliver_cab)
7508 {
7509 struct wmi_bcn_tx_ref_cmd *cmd;
7510 struct sk_buff *skb;
7511 struct ieee80211_hdr *hdr;
7512 u16 fc;
7513
7514 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7515 if (!skb)
7516 return ERR_PTR(-ENOMEM);
7517
7518 hdr = (struct ieee80211_hdr *)bcn;
7519 fc = le16_to_cpu(hdr->frame_control);
7520
7521 cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
7522 cmd->vdev_id = __cpu_to_le32(vdev_id);
7523 cmd->data_len = __cpu_to_le32(bcn_len);
7524 cmd->data_ptr = __cpu_to_le32(bcn_paddr);
7525 cmd->msdu_id = 0;
7526 cmd->frame_control = __cpu_to_le32(fc);
7527 cmd->flags = 0;
7528 cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
7529
7530 if (dtim_zero)
7531 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
7532
7533 if (deliver_cab)
7534 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
7535
7536 return skb;
7537 }
7538
ath10k_wmi_set_wmm_param(struct wmi_wmm_params * params,const struct wmi_wmm_params_arg * arg)7539 void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
7540 const struct wmi_wmm_params_arg *arg)
7541 {
7542 params->cwmin = __cpu_to_le32(arg->cwmin);
7543 params->cwmax = __cpu_to_le32(arg->cwmax);
7544 params->aifs = __cpu_to_le32(arg->aifs);
7545 params->txop = __cpu_to_le32(arg->txop);
7546 params->acm = __cpu_to_le32(arg->acm);
7547 params->no_ack = __cpu_to_le32(arg->no_ack);
7548 }
7549
7550 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k * ar,const struct wmi_wmm_params_all_arg * arg)7551 ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
7552 const struct wmi_wmm_params_all_arg *arg)
7553 {
7554 struct wmi_pdev_set_wmm_params *cmd;
7555 struct sk_buff *skb;
7556
7557 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7558 if (!skb)
7559 return ERR_PTR(-ENOMEM);
7560
7561 cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
7562 ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
7563 ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
7564 ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
7565 ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
7566
7567 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
7568 return skb;
7569 }
7570
7571 static struct sk_buff *
ath10k_wmi_op_gen_request_stats(struct ath10k * ar,u32 stats_mask)7572 ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
7573 {
7574 struct wmi_request_stats_cmd *cmd;
7575 struct sk_buff *skb;
7576
7577 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7578 if (!skb)
7579 return ERR_PTR(-ENOMEM);
7580
7581 cmd = (struct wmi_request_stats_cmd *)skb->data;
7582 cmd->stats_id = __cpu_to_le32(stats_mask);
7583
7584 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
7585 stats_mask);
7586 return skb;
7587 }
7588
7589 static struct sk_buff *
ath10k_wmi_op_gen_force_fw_hang(struct ath10k * ar,enum wmi_force_fw_hang_type type,u32 delay_ms)7590 ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
7591 enum wmi_force_fw_hang_type type, u32 delay_ms)
7592 {
7593 struct wmi_force_fw_hang_cmd *cmd;
7594 struct sk_buff *skb;
7595
7596 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7597 if (!skb)
7598 return ERR_PTR(-ENOMEM);
7599
7600 cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
7601 cmd->type = __cpu_to_le32(type);
7602 cmd->delay_ms = __cpu_to_le32(delay_ms);
7603
7604 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
7605 type, delay_ms);
7606 return skb;
7607 }
7608
7609 static struct sk_buff *
ath10k_wmi_op_gen_dbglog_cfg(struct ath10k * ar,u64 module_enable,u32 log_level)7610 ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
7611 u32 log_level)
7612 {
7613 struct wmi_dbglog_cfg_cmd *cmd;
7614 struct sk_buff *skb;
7615 u32 cfg;
7616
7617 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7618 if (!skb)
7619 return ERR_PTR(-ENOMEM);
7620
7621 cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
7622
7623 if (module_enable) {
7624 cfg = SM(log_level,
7625 ATH10K_DBGLOG_CFG_LOG_LVL);
7626 } else {
7627 /* set back defaults, all modules with WARN level */
7628 cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
7629 ATH10K_DBGLOG_CFG_LOG_LVL);
7630 module_enable = ~0;
7631 }
7632
7633 cmd->module_enable = __cpu_to_le32(module_enable);
7634 cmd->module_valid = __cpu_to_le32(~0);
7635 cmd->config_enable = __cpu_to_le32(cfg);
7636 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
7637
7638 ath10k_dbg(ar, ATH10K_DBG_WMI,
7639 "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
7640 __le32_to_cpu(cmd->module_enable),
7641 __le32_to_cpu(cmd->module_valid),
7642 __le32_to_cpu(cmd->config_enable),
7643 __le32_to_cpu(cmd->config_valid));
7644 return skb;
7645 }
7646
7647 static struct sk_buff *
ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k * ar,u64 module_enable,u32 log_level)7648 ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
7649 u32 log_level)
7650 {
7651 struct wmi_10_4_dbglog_cfg_cmd *cmd;
7652 struct sk_buff *skb;
7653 u32 cfg;
7654
7655 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7656 if (!skb)
7657 return ERR_PTR(-ENOMEM);
7658
7659 cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
7660
7661 if (module_enable) {
7662 cfg = SM(log_level,
7663 ATH10K_DBGLOG_CFG_LOG_LVL);
7664 } else {
7665 /* set back defaults, all modules with WARN level */
7666 cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
7667 ATH10K_DBGLOG_CFG_LOG_LVL);
7668 module_enable = ~0;
7669 }
7670
7671 cmd->module_enable = __cpu_to_le64(module_enable);
7672 cmd->module_valid = __cpu_to_le64(~0);
7673 cmd->config_enable = __cpu_to_le32(cfg);
7674 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
7675
7676 ath10k_dbg(ar, ATH10K_DBG_WMI,
7677 "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
7678 __le64_to_cpu(cmd->module_enable),
7679 __le64_to_cpu(cmd->module_valid),
7680 __le32_to_cpu(cmd->config_enable),
7681 __le32_to_cpu(cmd->config_valid));
7682 return skb;
7683 }
7684
7685 static struct sk_buff *
ath10k_wmi_op_gen_pktlog_enable(struct ath10k * ar,u32 ev_bitmap)7686 ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
7687 {
7688 struct wmi_pdev_pktlog_enable_cmd *cmd;
7689 struct sk_buff *skb;
7690
7691 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7692 if (!skb)
7693 return ERR_PTR(-ENOMEM);
7694
7695 ev_bitmap &= ATH10K_PKTLOG_ANY;
7696
7697 cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
7698 cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
7699
7700 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
7701 ev_bitmap);
7702 return skb;
7703 }
7704
7705 static struct sk_buff *
ath10k_wmi_op_gen_pktlog_disable(struct ath10k * ar)7706 ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
7707 {
7708 struct sk_buff *skb;
7709
7710 skb = ath10k_wmi_alloc_skb(ar, 0);
7711 if (!skb)
7712 return ERR_PTR(-ENOMEM);
7713
7714 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
7715 return skb;
7716 }
7717
7718 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k * ar,u32 period,u32 duration,u32 next_offset,u32 enabled)7719 ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
7720 u32 duration, u32 next_offset,
7721 u32 enabled)
7722 {
7723 struct wmi_pdev_set_quiet_cmd *cmd;
7724 struct sk_buff *skb;
7725
7726 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7727 if (!skb)
7728 return ERR_PTR(-ENOMEM);
7729
7730 cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
7731 cmd->period = __cpu_to_le32(period);
7732 cmd->duration = __cpu_to_le32(duration);
7733 cmd->next_start = __cpu_to_le32(next_offset);
7734 cmd->enabled = __cpu_to_le32(enabled);
7735
7736 ath10k_dbg(ar, ATH10K_DBG_WMI,
7737 "wmi quiet param: period %u duration %u enabled %d\n",
7738 period, duration, enabled);
7739 return skb;
7740 }
7741
7742 static struct sk_buff *
ath10k_wmi_op_gen_addba_clear_resp(struct ath10k * ar,u32 vdev_id,const u8 * mac)7743 ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
7744 const u8 *mac)
7745 {
7746 struct wmi_addba_clear_resp_cmd *cmd;
7747 struct sk_buff *skb;
7748
7749 if (!mac)
7750 return ERR_PTR(-EINVAL);
7751
7752 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7753 if (!skb)
7754 return ERR_PTR(-ENOMEM);
7755
7756 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
7757 cmd->vdev_id = __cpu_to_le32(vdev_id);
7758 ether_addr_copy(cmd->peer_macaddr.addr, mac);
7759
7760 ath10k_dbg(ar, ATH10K_DBG_WMI,
7761 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
7762 vdev_id, mac);
7763 return skb;
7764 }
7765
7766 static struct sk_buff *
ath10k_wmi_op_gen_addba_send(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 buf_size)7767 ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
7768 u32 tid, u32 buf_size)
7769 {
7770 struct wmi_addba_send_cmd *cmd;
7771 struct sk_buff *skb;
7772
7773 if (!mac)
7774 return ERR_PTR(-EINVAL);
7775
7776 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7777 if (!skb)
7778 return ERR_PTR(-ENOMEM);
7779
7780 cmd = (struct wmi_addba_send_cmd *)skb->data;
7781 cmd->vdev_id = __cpu_to_le32(vdev_id);
7782 ether_addr_copy(cmd->peer_macaddr.addr, mac);
7783 cmd->tid = __cpu_to_le32(tid);
7784 cmd->buffersize = __cpu_to_le32(buf_size);
7785
7786 ath10k_dbg(ar, ATH10K_DBG_WMI,
7787 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
7788 vdev_id, mac, tid, buf_size);
7789 return skb;
7790 }
7791
7792 static struct sk_buff *
ath10k_wmi_op_gen_addba_set_resp(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 status)7793 ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
7794 u32 tid, u32 status)
7795 {
7796 struct wmi_addba_setresponse_cmd *cmd;
7797 struct sk_buff *skb;
7798
7799 if (!mac)
7800 return ERR_PTR(-EINVAL);
7801
7802 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7803 if (!skb)
7804 return ERR_PTR(-ENOMEM);
7805
7806 cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
7807 cmd->vdev_id = __cpu_to_le32(vdev_id);
7808 ether_addr_copy(cmd->peer_macaddr.addr, mac);
7809 cmd->tid = __cpu_to_le32(tid);
7810 cmd->statuscode = __cpu_to_le32(status);
7811
7812 ath10k_dbg(ar, ATH10K_DBG_WMI,
7813 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
7814 vdev_id, mac, tid, status);
7815 return skb;
7816 }
7817
7818 static struct sk_buff *
ath10k_wmi_op_gen_delba_send(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 initiator,u32 reason)7819 ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
7820 u32 tid, u32 initiator, u32 reason)
7821 {
7822 struct wmi_delba_send_cmd *cmd;
7823 struct sk_buff *skb;
7824
7825 if (!mac)
7826 return ERR_PTR(-EINVAL);
7827
7828 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7829 if (!skb)
7830 return ERR_PTR(-ENOMEM);
7831
7832 cmd = (struct wmi_delba_send_cmd *)skb->data;
7833 cmd->vdev_id = __cpu_to_le32(vdev_id);
7834 ether_addr_copy(cmd->peer_macaddr.addr, mac);
7835 cmd->tid = __cpu_to_le32(tid);
7836 cmd->initiator = __cpu_to_le32(initiator);
7837 cmd->reasoncode = __cpu_to_le32(reason);
7838
7839 ath10k_dbg(ar, ATH10K_DBG_WMI,
7840 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
7841 vdev_id, mac, tid, initiator, reason);
7842 return skb;
7843 }
7844
7845 static struct sk_buff *
ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k * ar,u32 param)7846 ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
7847 {
7848 struct wmi_pdev_get_tpc_config_cmd *cmd;
7849 struct sk_buff *skb;
7850
7851 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7852 if (!skb)
7853 return ERR_PTR(-ENOMEM);
7854
7855 cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
7856 cmd->param = __cpu_to_le32(param);
7857
7858 ath10k_dbg(ar, ATH10K_DBG_WMI,
7859 "wmi pdev get tpc config param %d\n", param);
7860 return skb;
7861 }
7862
ath10k_wmi_fw_stats_num_peers(struct list_head * head)7863 size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head)
7864 {
7865 struct ath10k_fw_stats_peer *i;
7866 size_t num = 0;
7867
7868 list_for_each_entry(i, head, list)
7869 ++num;
7870
7871 return num;
7872 }
7873
ath10k_wmi_fw_stats_num_vdevs(struct list_head * head)7874 size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head)
7875 {
7876 struct ath10k_fw_stats_vdev *i;
7877 size_t num = 0;
7878
7879 list_for_each_entry(i, head, list)
7880 ++num;
7881
7882 return num;
7883 }
7884
7885 static void
ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)7886 ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
7887 char *buf, u32 *length)
7888 {
7889 u32 len = *length;
7890 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
7891
7892 len += scnprintf(buf + len, buf_len - len, "\n");
7893 len += scnprintf(buf + len, buf_len - len, "%30s\n",
7894 "ath10k PDEV stats");
7895 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7896 "=================");
7897
7898 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7899 "Channel noise floor", pdev->ch_noise_floor);
7900 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7901 "Channel TX power", pdev->chan_tx_power);
7902 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7903 "TX frame count", pdev->tx_frame_count);
7904 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7905 "RX frame count", pdev->rx_frame_count);
7906 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7907 "RX clear count", pdev->rx_clear_count);
7908 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7909 "Cycle count", pdev->cycle_count);
7910 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7911 "PHY error count", pdev->phy_err_count);
7912
7913 *length = len;
7914 }
7915
7916 static void
ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)7917 ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
7918 char *buf, u32 *length)
7919 {
7920 u32 len = *length;
7921 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
7922
7923 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7924 "RTS bad count", pdev->rts_bad);
7925 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7926 "RTS good count", pdev->rts_good);
7927 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7928 "FCS bad count", pdev->fcs_bad);
7929 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7930 "No beacon count", pdev->no_beacons);
7931 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7932 "MIB int count", pdev->mib_int_count);
7933
7934 len += scnprintf(buf + len, buf_len - len, "\n");
7935 *length = len;
7936 }
7937
7938 static void
ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)7939 ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
7940 char *buf, u32 *length)
7941 {
7942 u32 len = *length;
7943 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
7944
7945 len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7946 "ath10k PDEV TX stats");
7947 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7948 "=================");
7949
7950 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7951 "HTT cookies queued", pdev->comp_queued);
7952 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7953 "HTT cookies disp.", pdev->comp_delivered);
7954 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7955 "MSDU queued", pdev->msdu_enqued);
7956 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7957 "MPDU queued", pdev->mpdu_enqued);
7958 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7959 "MSDUs dropped", pdev->wmm_drop);
7960 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7961 "Local enqued", pdev->local_enqued);
7962 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7963 "Local freed", pdev->local_freed);
7964 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7965 "HW queued", pdev->hw_queued);
7966 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7967 "PPDUs reaped", pdev->hw_reaped);
7968 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7969 "Num underruns", pdev->underrun);
7970 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7971 "PPDUs cleaned", pdev->tx_abort);
7972 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7973 "MPDUs requed", pdev->mpdus_requed);
7974 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7975 "Excessive retries", pdev->tx_ko);
7976 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7977 "HW rate", pdev->data_rc);
7978 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7979 "Sched self triggers", pdev->self_triggers);
7980 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7981 "Dropped due to SW retries",
7982 pdev->sw_retry_failure);
7983 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7984 "Illegal rate phy errors",
7985 pdev->illgl_rate_phy_err);
7986 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7987 "Pdev continuous xretry", pdev->pdev_cont_xretry);
7988 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7989 "TX timeout", pdev->pdev_tx_timeout);
7990 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7991 "PDEV resets", pdev->pdev_resets);
7992 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7993 "PHY underrun", pdev->phy_underrun);
7994 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7995 "MPDU is more than txop limit", pdev->txop_ovf);
7996 *length = len;
7997 }
7998
7999 static void
ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8000 ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8001 char *buf, u32 *length)
8002 {
8003 u32 len = *length;
8004 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8005
8006 len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8007 "ath10k PDEV RX stats");
8008 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8009 "=================");
8010
8011 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8012 "Mid PPDU route change",
8013 pdev->mid_ppdu_route_change);
8014 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8015 "Tot. number of statuses", pdev->status_rcvd);
8016 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8017 "Extra frags on rings 0", pdev->r0_frags);
8018 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8019 "Extra frags on rings 1", pdev->r1_frags);
8020 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8021 "Extra frags on rings 2", pdev->r2_frags);
8022 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8023 "Extra frags on rings 3", pdev->r3_frags);
8024 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8025 "MSDUs delivered to HTT", pdev->htt_msdus);
8026 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8027 "MPDUs delivered to HTT", pdev->htt_mpdus);
8028 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8029 "MSDUs delivered to stack", pdev->loc_msdus);
8030 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8031 "MPDUs delivered to stack", pdev->loc_mpdus);
8032 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8033 "Oversized AMSUs", pdev->oversize_amsdu);
8034 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8035 "PHY errors", pdev->phy_errs);
8036 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8037 "PHY errors drops", pdev->phy_err_drop);
8038 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8039 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
8040 *length = len;
8041 }
8042
8043 static void
ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev * vdev,char * buf,u32 * length)8044 ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
8045 char *buf, u32 *length)
8046 {
8047 u32 len = *length;
8048 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8049 int i;
8050
8051 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8052 "vdev id", vdev->vdev_id);
8053 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8054 "beacon snr", vdev->beacon_snr);
8055 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8056 "data snr", vdev->data_snr);
8057 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8058 "num rx frames", vdev->num_rx_frames);
8059 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8060 "num rts fail", vdev->num_rts_fail);
8061 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8062 "num rts success", vdev->num_rts_success);
8063 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8064 "num rx err", vdev->num_rx_err);
8065 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8066 "num rx discard", vdev->num_rx_discard);
8067 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8068 "num tx not acked", vdev->num_tx_not_acked);
8069
8070 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
8071 len += scnprintf(buf + len, buf_len - len,
8072 "%25s [%02d] %u\n",
8073 "num tx frames", i,
8074 vdev->num_tx_frames[i]);
8075
8076 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
8077 len += scnprintf(buf + len, buf_len - len,
8078 "%25s [%02d] %u\n",
8079 "num tx frames retries", i,
8080 vdev->num_tx_frames_retries[i]);
8081
8082 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
8083 len += scnprintf(buf + len, buf_len - len,
8084 "%25s [%02d] %u\n",
8085 "num tx frames failures", i,
8086 vdev->num_tx_frames_failures[i]);
8087
8088 for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
8089 len += scnprintf(buf + len, buf_len - len,
8090 "%25s [%02d] 0x%08x\n",
8091 "tx rate history", i,
8092 vdev->tx_rate_history[i]);
8093
8094 for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
8095 len += scnprintf(buf + len, buf_len - len,
8096 "%25s [%02d] %u\n",
8097 "beacon rssi history", i,
8098 vdev->beacon_rssi_history[i]);
8099
8100 len += scnprintf(buf + len, buf_len - len, "\n");
8101 *length = len;
8102 }
8103
8104 static void
ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer * peer,char * buf,u32 * length)8105 ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
8106 char *buf, u32 *length)
8107 {
8108 u32 len = *length;
8109 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8110
8111 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8112 "Peer MAC address", peer->peer_macaddr);
8113 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8114 "Peer RSSI", peer->peer_rssi);
8115 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8116 "Peer TX rate", peer->peer_tx_rate);
8117 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8118 "Peer RX rate", peer->peer_rx_rate);
8119 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8120 "Peer RX duration", peer->rx_duration);
8121
8122 len += scnprintf(buf + len, buf_len - len, "\n");
8123 *length = len;
8124 }
8125
ath10k_wmi_main_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8126 void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
8127 struct ath10k_fw_stats *fw_stats,
8128 char *buf)
8129 {
8130 u32 len = 0;
8131 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8132 const struct ath10k_fw_stats_pdev *pdev;
8133 const struct ath10k_fw_stats_vdev *vdev;
8134 const struct ath10k_fw_stats_peer *peer;
8135 size_t num_peers;
8136 size_t num_vdevs;
8137
8138 spin_lock_bh(&ar->data_lock);
8139
8140 pdev = list_first_entry_or_null(&fw_stats->pdevs,
8141 struct ath10k_fw_stats_pdev, list);
8142 if (!pdev) {
8143 ath10k_warn(ar, "failed to get pdev stats\n");
8144 goto unlock;
8145 }
8146
8147 num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
8148 num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
8149
8150 ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8151 ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8152 ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8153
8154 len += scnprintf(buf + len, buf_len - len, "\n");
8155 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8156 "ath10k VDEV stats", num_vdevs);
8157 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8158 "=================");
8159
8160 list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8161 ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8162 }
8163
8164 len += scnprintf(buf + len, buf_len - len, "\n");
8165 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8166 "ath10k PEER stats", num_peers);
8167 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8168 "=================");
8169
8170 list_for_each_entry(peer, &fw_stats->peers, list) {
8171 ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
8172 }
8173
8174 unlock:
8175 spin_unlock_bh(&ar->data_lock);
8176
8177 if (len >= buf_len)
8178 buf[len - 1] = 0;
8179 else
8180 buf[len] = 0;
8181 }
8182
ath10k_wmi_10x_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8183 void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
8184 struct ath10k_fw_stats *fw_stats,
8185 char *buf)
8186 {
8187 unsigned int len = 0;
8188 unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
8189 const struct ath10k_fw_stats_pdev *pdev;
8190 const struct ath10k_fw_stats_vdev *vdev;
8191 const struct ath10k_fw_stats_peer *peer;
8192 size_t num_peers;
8193 size_t num_vdevs;
8194
8195 spin_lock_bh(&ar->data_lock);
8196
8197 pdev = list_first_entry_or_null(&fw_stats->pdevs,
8198 struct ath10k_fw_stats_pdev, list);
8199 if (!pdev) {
8200 ath10k_warn(ar, "failed to get pdev stats\n");
8201 goto unlock;
8202 }
8203
8204 num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
8205 num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
8206
8207 ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8208 ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8209 ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8210 ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8211
8212 len += scnprintf(buf + len, buf_len - len, "\n");
8213 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8214 "ath10k VDEV stats", num_vdevs);
8215 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8216 "=================");
8217
8218 list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8219 ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8220 }
8221
8222 len += scnprintf(buf + len, buf_len - len, "\n");
8223 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8224 "ath10k PEER stats", num_peers);
8225 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8226 "=================");
8227
8228 list_for_each_entry(peer, &fw_stats->peers, list) {
8229 ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
8230 }
8231
8232 unlock:
8233 spin_unlock_bh(&ar->data_lock);
8234
8235 if (len >= buf_len)
8236 buf[len - 1] = 0;
8237 else
8238 buf[len] = 0;
8239 }
8240
8241 static struct sk_buff *
ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k * ar,u8 enable,u32 detect_level,u32 detect_margin)8242 ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
8243 u32 detect_level, u32 detect_margin)
8244 {
8245 struct wmi_pdev_set_adaptive_cca_params *cmd;
8246 struct sk_buff *skb;
8247
8248 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8249 if (!skb)
8250 return ERR_PTR(-ENOMEM);
8251
8252 cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
8253 cmd->enable = __cpu_to_le32(enable);
8254 cmd->cca_detect_level = __cpu_to_le32(detect_level);
8255 cmd->cca_detect_margin = __cpu_to_le32(detect_margin);
8256
8257 ath10k_dbg(ar, ATH10K_DBG_WMI,
8258 "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n",
8259 enable, detect_level, detect_margin);
8260 return skb;
8261 }
8262
8263 static void
ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd * vdev,char * buf,u32 * length)8264 ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd *vdev,
8265 char *buf, u32 *length)
8266 {
8267 u32 len = *length;
8268 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8269 u32 val;
8270
8271 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8272 "vdev id", vdev->vdev_id);
8273 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8274 "ppdu aggr count", vdev->ppdu_aggr_cnt);
8275 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8276 "ppdu noack", vdev->ppdu_noack);
8277 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8278 "mpdu queued", vdev->mpdu_queued);
8279 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8280 "ppdu nonaggr count", vdev->ppdu_nonaggr_cnt);
8281 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8282 "mpdu sw requeued", vdev->mpdu_sw_requeued);
8283 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8284 "mpdu success retry", vdev->mpdu_suc_retry);
8285 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8286 "mpdu success multitry", vdev->mpdu_suc_multitry);
8287 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8288 "mpdu fail retry", vdev->mpdu_fail_retry);
8289 val = vdev->tx_ftm_suc;
8290 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8291 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8292 "tx ftm success",
8293 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8294 val = vdev->tx_ftm_suc_retry;
8295 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8296 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8297 "tx ftm success retry",
8298 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8299 val = vdev->tx_ftm_fail;
8300 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8301 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8302 "tx ftm fail",
8303 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8304 val = vdev->rx_ftmr_cnt;
8305 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8306 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8307 "rx ftm request count",
8308 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8309 val = vdev->rx_ftmr_dup_cnt;
8310 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8311 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8312 "rx ftm request dup count",
8313 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8314 val = vdev->rx_iftmr_cnt;
8315 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8316 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8317 "rx initial ftm req count",
8318 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8319 val = vdev->rx_iftmr_dup_cnt;
8320 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8321 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8322 "rx initial ftm req dup cnt",
8323 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8324 len += scnprintf(buf + len, buf_len - len, "\n");
8325
8326 *length = len;
8327 }
8328
ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8329 void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
8330 struct ath10k_fw_stats *fw_stats,
8331 char *buf)
8332 {
8333 u32 len = 0;
8334 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8335 const struct ath10k_fw_stats_pdev *pdev;
8336 const struct ath10k_fw_stats_vdev_extd *vdev;
8337 const struct ath10k_fw_stats_peer *peer;
8338 size_t num_peers;
8339 size_t num_vdevs;
8340
8341 spin_lock_bh(&ar->data_lock);
8342
8343 pdev = list_first_entry_or_null(&fw_stats->pdevs,
8344 struct ath10k_fw_stats_pdev, list);
8345 if (!pdev) {
8346 ath10k_warn(ar, "failed to get pdev stats\n");
8347 goto unlock;
8348 }
8349
8350 num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
8351 num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
8352
8353 ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8354 ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8355 ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8356
8357 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8358 "HW paused", pdev->hw_paused);
8359 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8360 "Seqs posted", pdev->seq_posted);
8361 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8362 "Seqs failed queueing", pdev->seq_failed_queueing);
8363 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8364 "Seqs completed", pdev->seq_completed);
8365 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8366 "Seqs restarted", pdev->seq_restarted);
8367 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8368 "MU Seqs posted", pdev->mu_seq_posted);
8369 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8370 "MPDUs SW flushed", pdev->mpdus_sw_flush);
8371 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8372 "MPDUs HW filtered", pdev->mpdus_hw_filter);
8373 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8374 "MPDUs truncated", pdev->mpdus_truncated);
8375 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8376 "MPDUs receive no ACK", pdev->mpdus_ack_failed);
8377 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8378 "MPDUs expired", pdev->mpdus_expired);
8379
8380 ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8381 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8382 "Num Rx Overflow errors", pdev->rx_ovfl_errs);
8383
8384 len += scnprintf(buf + len, buf_len - len, "\n");
8385 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8386 "ath10k VDEV stats", num_vdevs);
8387 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8388 "=================");
8389 list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8390 ath10k_wmi_fw_vdev_stats_extd_fill(vdev, buf, &len);
8391 }
8392
8393 len += scnprintf(buf + len, buf_len - len, "\n");
8394 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8395 "ath10k PEER stats", num_peers);
8396 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8397 "=================");
8398
8399 list_for_each_entry(peer, &fw_stats->peers, list) {
8400 ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
8401 }
8402
8403 unlock:
8404 spin_unlock_bh(&ar->data_lock);
8405
8406 if (len >= buf_len)
8407 buf[len - 1] = 0;
8408 else
8409 buf[len] = 0;
8410 }
8411
ath10k_wmi_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8412 int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
8413 enum wmi_vdev_subtype subtype)
8414 {
8415 switch (subtype) {
8416 case WMI_VDEV_SUBTYPE_NONE:
8417 return WMI_VDEV_SUBTYPE_LEGACY_NONE;
8418 case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8419 return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV;
8420 case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8421 return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI;
8422 case WMI_VDEV_SUBTYPE_P2P_GO:
8423 return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO;
8424 case WMI_VDEV_SUBTYPE_PROXY_STA:
8425 return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
8426 case WMI_VDEV_SUBTYPE_MESH_11S:
8427 case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8428 return -ENOTSUPP;
8429 }
8430 return -ENOTSUPP;
8431 }
8432
ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8433 static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
8434 enum wmi_vdev_subtype subtype)
8435 {
8436 switch (subtype) {
8437 case WMI_VDEV_SUBTYPE_NONE:
8438 return WMI_VDEV_SUBTYPE_10_2_4_NONE;
8439 case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8440 return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV;
8441 case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8442 return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI;
8443 case WMI_VDEV_SUBTYPE_P2P_GO:
8444 return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO;
8445 case WMI_VDEV_SUBTYPE_PROXY_STA:
8446 return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA;
8447 case WMI_VDEV_SUBTYPE_MESH_11S:
8448 return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
8449 case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8450 return -ENOTSUPP;
8451 }
8452 return -ENOTSUPP;
8453 }
8454
ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8455 static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
8456 enum wmi_vdev_subtype subtype)
8457 {
8458 switch (subtype) {
8459 case WMI_VDEV_SUBTYPE_NONE:
8460 return WMI_VDEV_SUBTYPE_10_4_NONE;
8461 case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8462 return WMI_VDEV_SUBTYPE_10_4_P2P_DEV;
8463 case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8464 return WMI_VDEV_SUBTYPE_10_4_P2P_CLI;
8465 case WMI_VDEV_SUBTYPE_P2P_GO:
8466 return WMI_VDEV_SUBTYPE_10_4_P2P_GO;
8467 case WMI_VDEV_SUBTYPE_PROXY_STA:
8468 return WMI_VDEV_SUBTYPE_10_4_PROXY_STA;
8469 case WMI_VDEV_SUBTYPE_MESH_11S:
8470 return WMI_VDEV_SUBTYPE_10_4_MESH_11S;
8471 case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8472 return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
8473 }
8474 return -ENOTSUPP;
8475 }
8476
8477 static struct sk_buff *
ath10k_wmi_10_4_ext_resource_config(struct ath10k * ar,enum wmi_host_platform_type type,u32 fw_feature_bitmap)8478 ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
8479 enum wmi_host_platform_type type,
8480 u32 fw_feature_bitmap)
8481 {
8482 struct wmi_ext_resource_config_10_4_cmd *cmd;
8483 struct sk_buff *skb;
8484 u32 num_tdls_sleep_sta = 0;
8485
8486 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8487 if (!skb)
8488 return ERR_PTR(-ENOMEM);
8489
8490 if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map))
8491 num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA;
8492
8493 cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
8494 cmd->host_platform_config = __cpu_to_le32(type);
8495 cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
8496 cmd->wlan_gpio_priority = __cpu_to_le32(-1);
8497 cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT);
8498 cmd->coex_gpio_pin1 = __cpu_to_le32(-1);
8499 cmd->coex_gpio_pin2 = __cpu_to_le32(-1);
8500 cmd->coex_gpio_pin3 = __cpu_to_le32(-1);
8501 cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS);
8502 cmd->num_tdls_conn_table_entries = __cpu_to_le32(20);
8503 cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta);
8504 cmd->max_tdls_concurrent_buffer_sta =
8505 __cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA);
8506
8507 ath10k_dbg(ar, ATH10K_DBG_WMI,
8508 "wmi ext resource config host type %d firmware feature bitmap %08x\n",
8509 type, fw_feature_bitmap);
8510 return skb;
8511 }
8512
8513 static struct sk_buff *
ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k * ar,u32 vdev_id,enum wmi_tdls_state state)8514 ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
8515 enum wmi_tdls_state state)
8516 {
8517 struct wmi_10_4_tdls_set_state_cmd *cmd;
8518 struct sk_buff *skb;
8519 u32 options = 0;
8520
8521 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8522 if (!skb)
8523 return ERR_PTR(-ENOMEM);
8524
8525 if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map) &&
8526 state == WMI_TDLS_ENABLE_ACTIVE)
8527 state = WMI_TDLS_ENABLE_PASSIVE;
8528
8529 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
8530 options |= WMI_TDLS_BUFFER_STA_EN;
8531
8532 cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data;
8533 cmd->vdev_id = __cpu_to_le32(vdev_id);
8534 cmd->state = __cpu_to_le32(state);
8535 cmd->notification_interval_ms = __cpu_to_le32(5000);
8536 cmd->tx_discovery_threshold = __cpu_to_le32(100);
8537 cmd->tx_teardown_threshold = __cpu_to_le32(5);
8538 cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
8539 cmd->rssi_delta = __cpu_to_le32(-20);
8540 cmd->tdls_options = __cpu_to_le32(options);
8541 cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
8542 cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
8543 cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
8544 cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
8545 cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
8546 cmd->teardown_notification_ms = __cpu_to_le32(10);
8547 cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96);
8548
8549 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n",
8550 state, vdev_id);
8551 return skb;
8552 }
8553
ath10k_wmi_prepare_peer_qos(u8 uapsd_queues,u8 sp)8554 static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp)
8555 {
8556 u32 peer_qos = 0;
8557
8558 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
8559 peer_qos |= WMI_TDLS_PEER_QOS_AC_VO;
8560 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
8561 peer_qos |= WMI_TDLS_PEER_QOS_AC_VI;
8562 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
8563 peer_qos |= WMI_TDLS_PEER_QOS_AC_BK;
8564 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
8565 peer_qos |= WMI_TDLS_PEER_QOS_AC_BE;
8566
8567 peer_qos |= SM(sp, WMI_TDLS_PEER_SP);
8568
8569 return peer_qos;
8570 }
8571
8572 static struct sk_buff *
ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k * ar,u32 param)8573 ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
8574 {
8575 struct wmi_pdev_get_tpc_table_cmd *cmd;
8576 struct sk_buff *skb;
8577
8578 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8579 if (!skb)
8580 return ERR_PTR(-ENOMEM);
8581
8582 cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data;
8583 cmd->param = __cpu_to_le32(param);
8584
8585 ath10k_dbg(ar, ATH10K_DBG_WMI,
8586 "wmi pdev get tpc table param:%d\n", param);
8587 return skb;
8588 }
8589
8590 static struct sk_buff *
ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k * ar,const struct wmi_tdls_peer_update_cmd_arg * arg,const struct wmi_tdls_peer_capab_arg * cap,const struct wmi_channel_arg * chan_arg)8591 ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
8592 const struct wmi_tdls_peer_update_cmd_arg *arg,
8593 const struct wmi_tdls_peer_capab_arg *cap,
8594 const struct wmi_channel_arg *chan_arg)
8595 {
8596 struct wmi_10_4_tdls_peer_update_cmd *cmd;
8597 struct wmi_tdls_peer_capabilities *peer_cap;
8598 struct wmi_channel *chan;
8599 struct sk_buff *skb;
8600 u32 peer_qos;
8601 int len, chan_len;
8602 int i;
8603
8604 /* tdls peer update cmd has place holder for one channel*/
8605 chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0;
8606
8607 len = sizeof(*cmd) + chan_len * sizeof(*chan);
8608
8609 skb = ath10k_wmi_alloc_skb(ar, len);
8610 if (!skb)
8611 return ERR_PTR(-ENOMEM);
8612
8613 memset(skb->data, 0, sizeof(*cmd));
8614
8615 cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
8616 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
8617 ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
8618 cmd->peer_state = __cpu_to_le32(arg->peer_state);
8619
8620 peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues,
8621 cap->peer_max_sp);
8622
8623 peer_cap = &cmd->peer_capab;
8624 peer_cap->peer_qos = __cpu_to_le32(peer_qos);
8625 peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
8626 peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
8627 peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
8628 peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
8629 peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
8630 peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
8631
8632 for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
8633 peer_cap->peer_operclass[i] = cap->peer_operclass[i];
8634
8635 peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
8636 peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
8637 peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
8638
8639 for (i = 0; i < cap->peer_chan_len; i++) {
8640 chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
8641 ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
8642 }
8643
8644 ath10k_dbg(ar, ATH10K_DBG_WMI,
8645 "wmi tdls peer update vdev %i state %d n_chans %u\n",
8646 arg->vdev_id, arg->peer_state, cap->peer_chan_len);
8647 return skb;
8648 }
8649
8650 static struct sk_buff *
ath10k_wmi_10_4_gen_radar_found(struct ath10k * ar,const struct ath10k_radar_found_info * arg)8651 ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar,
8652 const struct ath10k_radar_found_info *arg)
8653 {
8654 struct wmi_radar_found_info *cmd;
8655 struct sk_buff *skb;
8656
8657 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8658 if (!skb)
8659 return ERR_PTR(-ENOMEM);
8660
8661 cmd = (struct wmi_radar_found_info *)skb->data;
8662 cmd->pri_min = __cpu_to_le32(arg->pri_min);
8663 cmd->pri_max = __cpu_to_le32(arg->pri_max);
8664 cmd->width_min = __cpu_to_le32(arg->width_min);
8665 cmd->width_max = __cpu_to_le32(arg->width_max);
8666 cmd->sidx_min = __cpu_to_le32(arg->sidx_min);
8667 cmd->sidx_max = __cpu_to_le32(arg->sidx_max);
8668
8669 ath10k_dbg(ar, ATH10K_DBG_WMI,
8670 "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
8671 arg->pri_min, arg->pri_max, arg->width_min,
8672 arg->width_max, arg->sidx_min, arg->sidx_max);
8673 return skb;
8674 }
8675
8676 static struct sk_buff *
ath10k_wmi_op_gen_echo(struct ath10k * ar,u32 value)8677 ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
8678 {
8679 struct wmi_echo_cmd *cmd;
8680 struct sk_buff *skb;
8681
8682 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8683 if (!skb)
8684 return ERR_PTR(-ENOMEM);
8685
8686 cmd = (struct wmi_echo_cmd *)skb->data;
8687 cmd->value = cpu_to_le32(value);
8688
8689 ath10k_dbg(ar, ATH10K_DBG_WMI,
8690 "wmi echo value 0x%08x\n", value);
8691 return skb;
8692 }
8693
8694 int
ath10k_wmi_barrier(struct ath10k * ar)8695 ath10k_wmi_barrier(struct ath10k *ar)
8696 {
8697 int ret;
8698 int time_left;
8699
8700 spin_lock_bh(&ar->data_lock);
8701 reinit_completion(&ar->wmi.barrier);
8702 spin_unlock_bh(&ar->data_lock);
8703
8704 ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
8705 if (ret) {
8706 ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
8707 return ret;
8708 }
8709
8710 time_left = wait_for_completion_timeout(&ar->wmi.barrier,
8711 ATH10K_WMI_BARRIER_TIMEOUT_HZ);
8712 if (!time_left)
8713 return -ETIMEDOUT;
8714
8715 return 0;
8716 }
8717
8718 static const struct wmi_ops wmi_ops = {
8719 .rx = ath10k_wmi_op_rx,
8720 .map_svc = wmi_main_svc_map,
8721
8722 .pull_scan = ath10k_wmi_op_pull_scan_ev,
8723 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
8724 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
8725 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
8726 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
8727 .pull_swba = ath10k_wmi_op_pull_swba_ev,
8728 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
8729 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
8730 .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
8731 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
8732 .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
8733 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
8734 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
8735
8736 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
8737 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
8738 .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
8739 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
8740 .gen_init = ath10k_wmi_op_gen_init,
8741 .gen_start_scan = ath10k_wmi_op_gen_start_scan,
8742 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
8743 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
8744 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
8745 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
8746 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
8747 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
8748 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
8749 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
8750 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
8751 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
8752 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
8753 /* .gen_vdev_wmm_conf not implemented */
8754 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
8755 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
8756 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
8757 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
8758 .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
8759 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
8760 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
8761 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
8762 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
8763 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
8764 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
8765 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
8766 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
8767 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
8768 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
8769 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
8770 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
8771 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
8772 /* .gen_pdev_get_temperature not implemented */
8773 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
8774 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
8775 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
8776 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
8777 .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
8778 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
8779 .gen_echo = ath10k_wmi_op_gen_echo,
8780 /* .gen_bcn_tmpl not implemented */
8781 /* .gen_prb_tmpl not implemented */
8782 /* .gen_p2p_go_bcn_ie not implemented */
8783 /* .gen_adaptive_qcs not implemented */
8784 /* .gen_pdev_enable_adaptive_cca not implemented */
8785 };
8786
8787 static const struct wmi_ops wmi_10_1_ops = {
8788 .rx = ath10k_wmi_10_1_op_rx,
8789 .map_svc = wmi_10x_svc_map,
8790 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
8791 .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
8792 .gen_init = ath10k_wmi_10_1_op_gen_init,
8793 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
8794 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
8795 .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
8796 /* .gen_pdev_get_temperature not implemented */
8797
8798 /* shared with main branch */
8799 .pull_scan = ath10k_wmi_op_pull_scan_ev,
8800 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
8801 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
8802 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
8803 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
8804 .pull_swba = ath10k_wmi_op_pull_swba_ev,
8805 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
8806 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
8807 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
8808 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
8809 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
8810
8811 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
8812 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
8813 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
8814 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
8815 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
8816 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
8817 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
8818 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
8819 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
8820 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
8821 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
8822 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
8823 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
8824 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
8825 /* .gen_vdev_wmm_conf not implemented */
8826 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
8827 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
8828 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
8829 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
8830 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
8831 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
8832 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
8833 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
8834 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
8835 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
8836 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
8837 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
8838 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
8839 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
8840 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
8841 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
8842 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
8843 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
8844 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
8845 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
8846 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
8847 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
8848 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
8849 .gen_echo = ath10k_wmi_op_gen_echo,
8850 /* .gen_bcn_tmpl not implemented */
8851 /* .gen_prb_tmpl not implemented */
8852 /* .gen_p2p_go_bcn_ie not implemented */
8853 /* .gen_adaptive_qcs not implemented */
8854 /* .gen_pdev_enable_adaptive_cca not implemented */
8855 };
8856
8857 static const struct wmi_ops wmi_10_2_ops = {
8858 .rx = ath10k_wmi_10_2_op_rx,
8859 .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
8860 .gen_init = ath10k_wmi_10_2_op_gen_init,
8861 .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
8862 /* .gen_pdev_get_temperature not implemented */
8863
8864 /* shared with 10.1 */
8865 .map_svc = wmi_10x_svc_map,
8866 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
8867 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
8868 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
8869 .gen_echo = ath10k_wmi_op_gen_echo,
8870
8871 .pull_scan = ath10k_wmi_op_pull_scan_ev,
8872 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
8873 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
8874 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
8875 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
8876 .pull_swba = ath10k_wmi_op_pull_swba_ev,
8877 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
8878 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
8879 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
8880 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
8881 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
8882
8883 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
8884 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
8885 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
8886 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
8887 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
8888 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
8889 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
8890 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
8891 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
8892 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
8893 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
8894 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
8895 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
8896 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
8897 /* .gen_vdev_wmm_conf not implemented */
8898 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
8899 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
8900 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
8901 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
8902 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
8903 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
8904 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
8905 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
8906 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
8907 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
8908 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
8909 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
8910 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
8911 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
8912 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
8913 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
8914 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
8915 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
8916 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
8917 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
8918 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
8919 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
8920 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
8921 /* .gen_pdev_enable_adaptive_cca not implemented */
8922 };
8923
8924 static const struct wmi_ops wmi_10_2_4_ops = {
8925 .rx = ath10k_wmi_10_2_op_rx,
8926 .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
8927 .gen_init = ath10k_wmi_10_2_op_gen_init,
8928 .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
8929 .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
8930 .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
8931
8932 /* shared with 10.1 */
8933 .map_svc = wmi_10x_svc_map,
8934 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
8935 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
8936 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
8937 .gen_echo = ath10k_wmi_op_gen_echo,
8938
8939 .pull_scan = ath10k_wmi_op_pull_scan_ev,
8940 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
8941 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
8942 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
8943 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
8944 .pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev,
8945 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
8946 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
8947 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
8948 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
8949 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
8950
8951 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
8952 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
8953 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
8954 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
8955 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
8956 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
8957 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
8958 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
8959 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
8960 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
8961 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
8962 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
8963 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
8964 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
8965 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
8966 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
8967 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
8968 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
8969 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
8970 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
8971 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
8972 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
8973 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
8974 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
8975 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
8976 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
8977 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
8978 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
8979 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
8980 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
8981 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
8982 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
8983 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
8984 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
8985 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
8986 .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
8987 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
8988 .gen_pdev_enable_adaptive_cca =
8989 ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
8990 .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
8991 /* .gen_bcn_tmpl not implemented */
8992 /* .gen_prb_tmpl not implemented */
8993 /* .gen_p2p_go_bcn_ie not implemented */
8994 /* .gen_adaptive_qcs not implemented */
8995 };
8996
8997 static const struct wmi_ops wmi_10_4_ops = {
8998 .rx = ath10k_wmi_10_4_op_rx,
8999 .map_svc = wmi_10_4_svc_map,
9000
9001 .pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats,
9002 .pull_scan = ath10k_wmi_op_pull_scan_ev,
9003 .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
9004 .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
9005 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9006 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9007 .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
9008 .pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
9009 .pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
9010 .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9011 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9012 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9013 .pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev,
9014 .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
9015
9016 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9017 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9018 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9019 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9020 .gen_init = ath10k_wmi_10_4_op_gen_init,
9021 .gen_start_scan = ath10k_wmi_op_gen_start_scan,
9022 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9023 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9024 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9025 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9026 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9027 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9028 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9029 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9030 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9031 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9032 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9033 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
9034 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9035 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9036 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9037 .gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc,
9038 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9039 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9040 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9041 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9042 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9043 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9044 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9045 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9046 .gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
9047 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9048 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9049 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9050 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9051 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
9052 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9053 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
9054 .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
9055 .ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
9056 .gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state,
9057 .gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update,
9058 .gen_pdev_get_tpc_table_cmdid =
9059 ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid,
9060 .gen_radar_found = ath10k_wmi_10_4_gen_radar_found,
9061
9062 /* shared with 10.2 */
9063 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9064 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
9065 .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9066 .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
9067 .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9068 .gen_echo = ath10k_wmi_op_gen_echo,
9069 .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9070 };
9071
ath10k_wmi_attach(struct ath10k * ar)9072 int ath10k_wmi_attach(struct ath10k *ar)
9073 {
9074 switch (ar->running_fw->fw_file.wmi_op_version) {
9075 case ATH10K_FW_WMI_OP_VERSION_10_4:
9076 ar->wmi.ops = &wmi_10_4_ops;
9077 ar->wmi.cmd = &wmi_10_4_cmd_map;
9078 ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
9079 ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
9080 ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9081 break;
9082 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
9083 ar->wmi.cmd = &wmi_10_2_4_cmd_map;
9084 ar->wmi.ops = &wmi_10_2_4_ops;
9085 ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
9086 ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
9087 ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9088 break;
9089 case ATH10K_FW_WMI_OP_VERSION_10_2:
9090 ar->wmi.cmd = &wmi_10_2_cmd_map;
9091 ar->wmi.ops = &wmi_10_2_ops;
9092 ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9093 ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9094 ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9095 break;
9096 case ATH10K_FW_WMI_OP_VERSION_10_1:
9097 ar->wmi.cmd = &wmi_10x_cmd_map;
9098 ar->wmi.ops = &wmi_10_1_ops;
9099 ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9100 ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9101 ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
9102 break;
9103 case ATH10K_FW_WMI_OP_VERSION_MAIN:
9104 ar->wmi.cmd = &wmi_cmd_map;
9105 ar->wmi.ops = &wmi_ops;
9106 ar->wmi.vdev_param = &wmi_vdev_param_map;
9107 ar->wmi.pdev_param = &wmi_pdev_param_map;
9108 ar->wmi.peer_flags = &wmi_peer_flags_map;
9109 break;
9110 case ATH10K_FW_WMI_OP_VERSION_TLV:
9111 ath10k_wmi_tlv_attach(ar);
9112 break;
9113 case ATH10K_FW_WMI_OP_VERSION_UNSET:
9114 case ATH10K_FW_WMI_OP_VERSION_MAX:
9115 ath10k_err(ar, "unsupported WMI op version: %d\n",
9116 ar->running_fw->fw_file.wmi_op_version);
9117 return -EINVAL;
9118 }
9119
9120 init_completion(&ar->wmi.service_ready);
9121 init_completion(&ar->wmi.unified_ready);
9122 init_completion(&ar->wmi.barrier);
9123 init_completion(&ar->wmi.radar_confirm);
9124
9125 INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
9126 INIT_WORK(&ar->radar_confirmation_work,
9127 ath10k_radar_confirmation_work);
9128
9129 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9130 ar->running_fw->fw_file.fw_features)) {
9131 idr_init(&ar->wmi.mgmt_pending_tx);
9132 }
9133
9134 return 0;
9135 }
9136
ath10k_wmi_free_host_mem(struct ath10k * ar)9137 void ath10k_wmi_free_host_mem(struct ath10k *ar)
9138 {
9139 int i;
9140
9141 /* free the host memory chunks requested by firmware */
9142 for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
9143 dma_free_coherent(ar->dev,
9144 ar->wmi.mem_chunks[i].len,
9145 ar->wmi.mem_chunks[i].vaddr,
9146 ar->wmi.mem_chunks[i].paddr);
9147 }
9148
9149 ar->wmi.num_mem_chunks = 0;
9150 }
9151
ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id,void * ptr,void * ctx)9152 static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
9153 void *ctx)
9154 {
9155 struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
9156 struct ath10k *ar = ctx;
9157 struct sk_buff *msdu;
9158
9159 ath10k_dbg(ar, ATH10K_DBG_WMI,
9160 "force cleanup mgmt msdu_id %hu\n", msdu_id);
9161
9162 msdu = pkt_addr->vaddr;
9163 dma_unmap_single(ar->dev, pkt_addr->paddr,
9164 msdu->len, DMA_FROM_DEVICE);
9165 ieee80211_free_txskb(ar->hw, msdu);
9166
9167 return 0;
9168 }
9169
ath10k_wmi_detach(struct ath10k * ar)9170 void ath10k_wmi_detach(struct ath10k *ar)
9171 {
9172 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9173 ar->running_fw->fw_file.fw_features)) {
9174 spin_lock_bh(&ar->data_lock);
9175 idr_for_each(&ar->wmi.mgmt_pending_tx,
9176 ath10k_wmi_mgmt_tx_clean_up_pending, ar);
9177 idr_destroy(&ar->wmi.mgmt_pending_tx);
9178 spin_unlock_bh(&ar->data_lock);
9179 }
9180
9181 cancel_work_sync(&ar->svc_rdy_work);
9182
9183 if (ar->svc_rdy_skb)
9184 dev_kfree_skb(ar->svc_rdy_skb);
9185 }
9186