1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
4  */
5 
6 #include <linux/types.h>
7 #include <linux/bitops.h>
8 #include <linux/bitfield.h>
9 #include "core.h"
10 #include "hw.h"
11 #include "hif.h"
12 #include "wmi-ops.h"
13 #include "bmi.h"
14 
15 const struct ath10k_hw_regs qca988x_regs = {
16 	.rtc_soc_base_address		= 0x00004000,
17 	.rtc_wmac_base_address		= 0x00005000,
18 	.soc_core_base_address		= 0x00009000,
19 	.wlan_mac_base_address		= 0x00020000,
20 	.ce_wrapper_base_address	= 0x00057000,
21 	.ce0_base_address		= 0x00057400,
22 	.ce1_base_address		= 0x00057800,
23 	.ce2_base_address		= 0x00057c00,
24 	.ce3_base_address		= 0x00058000,
25 	.ce4_base_address		= 0x00058400,
26 	.ce5_base_address		= 0x00058800,
27 	.ce6_base_address		= 0x00058c00,
28 	.ce7_base_address		= 0x00059000,
29 	.soc_reset_control_si0_rst_mask	= 0x00000001,
30 	.soc_reset_control_ce_rst_mask	= 0x00040000,
31 	.soc_chip_id_address		= 0x000000ec,
32 	.scratch_3_address		= 0x00000030,
33 	.fw_indicator_address		= 0x00009030,
34 	.pcie_local_base_address	= 0x00080000,
35 	.ce_wrap_intr_sum_host_msi_lsb	= 0x00000008,
36 	.ce_wrap_intr_sum_host_msi_mask	= 0x0000ff00,
37 	.pcie_intr_fw_mask		= 0x00000400,
38 	.pcie_intr_ce_mask_all		= 0x0007f800,
39 	.pcie_intr_clr_address		= 0x00000014,
40 };
41 
42 const struct ath10k_hw_regs qca6174_regs = {
43 	.rtc_soc_base_address			= 0x00000800,
44 	.rtc_wmac_base_address			= 0x00001000,
45 	.soc_core_base_address			= 0x0003a000,
46 	.wlan_mac_base_address			= 0x00010000,
47 	.ce_wrapper_base_address		= 0x00034000,
48 	.ce0_base_address			= 0x00034400,
49 	.ce1_base_address			= 0x00034800,
50 	.ce2_base_address			= 0x00034c00,
51 	.ce3_base_address			= 0x00035000,
52 	.ce4_base_address			= 0x00035400,
53 	.ce5_base_address			= 0x00035800,
54 	.ce6_base_address			= 0x00035c00,
55 	.ce7_base_address			= 0x00036000,
56 	.soc_reset_control_si0_rst_mask		= 0x00000000,
57 	.soc_reset_control_ce_rst_mask		= 0x00000001,
58 	.soc_chip_id_address			= 0x000000f0,
59 	.scratch_3_address			= 0x00000028,
60 	.fw_indicator_address			= 0x0003a028,
61 	.pcie_local_base_address		= 0x00080000,
62 	.ce_wrap_intr_sum_host_msi_lsb		= 0x00000008,
63 	.ce_wrap_intr_sum_host_msi_mask		= 0x0000ff00,
64 	.pcie_intr_fw_mask			= 0x00000400,
65 	.pcie_intr_ce_mask_all			= 0x0007f800,
66 	.pcie_intr_clr_address			= 0x00000014,
67 	.cpu_pll_init_address			= 0x00404020,
68 	.cpu_speed_address			= 0x00404024,
69 	.core_clk_div_address			= 0x00404028,
70 };
71 
72 const struct ath10k_hw_regs qca99x0_regs = {
73 	.rtc_soc_base_address			= 0x00080000,
74 	.rtc_wmac_base_address			= 0x00000000,
75 	.soc_core_base_address			= 0x00082000,
76 	.wlan_mac_base_address			= 0x00030000,
77 	.ce_wrapper_base_address		= 0x0004d000,
78 	.ce0_base_address			= 0x0004a000,
79 	.ce1_base_address			= 0x0004a400,
80 	.ce2_base_address			= 0x0004a800,
81 	.ce3_base_address			= 0x0004ac00,
82 	.ce4_base_address			= 0x0004b000,
83 	.ce5_base_address			= 0x0004b400,
84 	.ce6_base_address			= 0x0004b800,
85 	.ce7_base_address			= 0x0004bc00,
86 	/* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
87 	 * CE0 and CE1 no other copy engine is directly referred in the code.
88 	 * It is not really necessary to assign address for newly supported
89 	 * CEs in this address table.
90 	 *	Copy Engine		Address
91 	 *	CE8			0x0004c000
92 	 *	CE9			0x0004c400
93 	 *	CE10			0x0004c800
94 	 *	CE11			0x0004cc00
95 	 */
96 	.soc_reset_control_si0_rst_mask		= 0x00000001,
97 	.soc_reset_control_ce_rst_mask		= 0x00000100,
98 	.soc_chip_id_address			= 0x000000ec,
99 	.scratch_3_address			= 0x00040050,
100 	.fw_indicator_address			= 0x00040050,
101 	.pcie_local_base_address		= 0x00000000,
102 	.ce_wrap_intr_sum_host_msi_lsb		= 0x0000000c,
103 	.ce_wrap_intr_sum_host_msi_mask		= 0x00fff000,
104 	.pcie_intr_fw_mask			= 0x00100000,
105 	.pcie_intr_ce_mask_all			= 0x000fff00,
106 	.pcie_intr_clr_address			= 0x00000010,
107 };
108 
109 const struct ath10k_hw_regs qca4019_regs = {
110 	.rtc_soc_base_address                   = 0x00080000,
111 	.soc_core_base_address                  = 0x00082000,
112 	.wlan_mac_base_address                  = 0x00030000,
113 	.ce_wrapper_base_address                = 0x0004d000,
114 	.ce0_base_address                       = 0x0004a000,
115 	.ce1_base_address                       = 0x0004a400,
116 	.ce2_base_address                       = 0x0004a800,
117 	.ce3_base_address                       = 0x0004ac00,
118 	.ce4_base_address                       = 0x0004b000,
119 	.ce5_base_address                       = 0x0004b400,
120 	.ce6_base_address                       = 0x0004b800,
121 	.ce7_base_address                       = 0x0004bc00,
122 	/* qca4019 supports upto 12 copy engines. Since base address
123 	 * of ce8 to ce11 are not directly referred in the code,
124 	 * no need have them in separate members in this table.
125 	 *      Copy Engine             Address
126 	 *      CE8                     0x0004c000
127 	 *      CE9                     0x0004c400
128 	 *      CE10                    0x0004c800
129 	 *      CE11                    0x0004cc00
130 	 */
131 	.soc_reset_control_si0_rst_mask         = 0x00000001,
132 	.soc_reset_control_ce_rst_mask          = 0x00000100,
133 	.soc_chip_id_address                    = 0x000000ec,
134 	.fw_indicator_address                   = 0x0004f00c,
135 	.ce_wrap_intr_sum_host_msi_lsb          = 0x0000000c,
136 	.ce_wrap_intr_sum_host_msi_mask         = 0x00fff000,
137 	.pcie_intr_fw_mask                      = 0x00100000,
138 	.pcie_intr_ce_mask_all                  = 0x000fff00,
139 	.pcie_intr_clr_address                  = 0x00000010,
140 };
141 
142 const struct ath10k_hw_values qca988x_values = {
143 	.rtc_state_val_on		= 3,
144 	.ce_count			= 8,
145 	.msi_assign_ce_max		= 7,
146 	.num_target_ce_config_wlan	= 7,
147 	.ce_desc_meta_data_mask		= 0xFFFC,
148 	.ce_desc_meta_data_lsb		= 2,
149 };
150 
151 const struct ath10k_hw_values qca6174_values = {
152 	.rtc_state_val_on		= 3,
153 	.ce_count			= 8,
154 	.msi_assign_ce_max		= 7,
155 	.num_target_ce_config_wlan	= 7,
156 	.ce_desc_meta_data_mask		= 0xFFFC,
157 	.ce_desc_meta_data_lsb		= 2,
158 };
159 
160 const struct ath10k_hw_values qca99x0_values = {
161 	.rtc_state_val_on		= 7,
162 	.ce_count			= 12,
163 	.msi_assign_ce_max		= 12,
164 	.num_target_ce_config_wlan	= 10,
165 	.ce_desc_meta_data_mask		= 0xFFF0,
166 	.ce_desc_meta_data_lsb		= 4,
167 };
168 
169 const struct ath10k_hw_values qca9888_values = {
170 	.rtc_state_val_on		= 3,
171 	.ce_count			= 12,
172 	.msi_assign_ce_max		= 12,
173 	.num_target_ce_config_wlan	= 10,
174 	.ce_desc_meta_data_mask		= 0xFFF0,
175 	.ce_desc_meta_data_lsb		= 4,
176 };
177 
178 const struct ath10k_hw_values qca4019_values = {
179 	.ce_count                       = 12,
180 	.num_target_ce_config_wlan      = 10,
181 	.ce_desc_meta_data_mask         = 0xFFF0,
182 	.ce_desc_meta_data_lsb          = 4,
183 };
184 
185 const struct ath10k_hw_regs wcn3990_regs = {
186 	.rtc_soc_base_address			= 0x00000000,
187 	.rtc_wmac_base_address			= 0x00000000,
188 	.soc_core_base_address			= 0x00000000,
189 	.ce_wrapper_base_address		= 0x0024C000,
190 	.ce0_base_address			= 0x00240000,
191 	.ce1_base_address			= 0x00241000,
192 	.ce2_base_address			= 0x00242000,
193 	.ce3_base_address			= 0x00243000,
194 	.ce4_base_address			= 0x00244000,
195 	.ce5_base_address			= 0x00245000,
196 	.ce6_base_address			= 0x00246000,
197 	.ce7_base_address			= 0x00247000,
198 	.ce8_base_address			= 0x00248000,
199 	.ce9_base_address			= 0x00249000,
200 	.ce10_base_address			= 0x0024A000,
201 	.ce11_base_address			= 0x0024B000,
202 	.soc_chip_id_address			= 0x000000f0,
203 	.soc_reset_control_si0_rst_mask		= 0x00000001,
204 	.soc_reset_control_ce_rst_mask		= 0x00000100,
205 	.ce_wrap_intr_sum_host_msi_lsb		= 0x0000000c,
206 	.ce_wrap_intr_sum_host_msi_mask		= 0x00fff000,
207 	.pcie_intr_fw_mask			= 0x00100000,
208 };
209 
210 static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
211 	.msb	= 0x00000010,
212 	.lsb	= 0x00000010,
213 	.mask	= GENMASK(17, 17),
214 };
215 
216 static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
217 	.msb	= 0x00000012,
218 	.lsb	= 0x00000012,
219 	.mask	= GENMASK(18, 18),
220 };
221 
222 static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
223 	.msb	= 0x00000000,
224 	.lsb	= 0x00000000,
225 	.mask	= GENMASK(15, 0),
226 };
227 
228 static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
229 	.addr		= 0x00000018,
230 	.src_ring	= &wcn3990_src_ring,
231 	.dst_ring	= &wcn3990_dst_ring,
232 	.dmax		= &wcn3990_dmax,
233 };
234 
235 static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
236 	.mask	= GENMASK(0, 0),
237 };
238 
239 static struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
240 	.copy_complete	= &wcn3990_host_ie_cc,
241 };
242 
243 static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
244 	.dstr_lmask	= 0x00000010,
245 	.dstr_hmask	= 0x00000008,
246 	.srcr_lmask	= 0x00000004,
247 	.srcr_hmask	= 0x00000002,
248 	.cc_mask	= 0x00000001,
249 	.wm_mask	= 0x0000001E,
250 	.addr		= 0x00000030,
251 };
252 
253 static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
254 	.axi_err	= 0x00000100,
255 	.dstr_add_err	= 0x00000200,
256 	.srcr_len_err	= 0x00000100,
257 	.dstr_mlen_vio	= 0x00000080,
258 	.dstr_overflow	= 0x00000040,
259 	.srcr_overflow	= 0x00000020,
260 	.err_mask	= 0x000003E0,
261 	.addr		= 0x00000038,
262 };
263 
264 static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
265 	.msb	= 0x00000000,
266 	.lsb	= 0x00000010,
267 	.mask	= GENMASK(31, 16),
268 };
269 
270 static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
271 	.msb	= 0x0000000f,
272 	.lsb	= 0x00000000,
273 	.mask	= GENMASK(15, 0),
274 };
275 
276 static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
277 	.addr		= 0x0000004c,
278 	.low_rst	= 0x00000000,
279 	.high_rst	= 0x00000000,
280 	.wm_low		= &wcn3990_src_wm_low,
281 	.wm_high	= &wcn3990_src_wm_high,
282 };
283 
284 static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
285 	.lsb	= 0x00000010,
286 	.mask	= GENMASK(31, 16),
287 };
288 
289 static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
290 	.msb	= 0x0000000f,
291 	.lsb	= 0x00000000,
292 	.mask	= GENMASK(15, 0),
293 };
294 
295 static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
296 	.addr		= 0x00000050,
297 	.low_rst	= 0x00000000,
298 	.high_rst	= 0x00000000,
299 	.wm_low		= &wcn3990_dst_wm_low,
300 	.wm_high	= &wcn3990_dst_wm_high,
301 };
302 
303 static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
304 	.shift = 19,
305 	.mask = 0x00080000,
306 	.enable = 0x00000000,
307 };
308 
309 const struct ath10k_hw_ce_regs wcn3990_ce_regs = {
310 	.sr_base_addr_lo	= 0x00000000,
311 	.sr_base_addr_hi	= 0x00000004,
312 	.sr_size_addr		= 0x00000008,
313 	.dr_base_addr_lo	= 0x0000000c,
314 	.dr_base_addr_hi	= 0x00000010,
315 	.dr_size_addr		= 0x00000014,
316 	.misc_ie_addr		= 0x00000034,
317 	.sr_wr_index_addr	= 0x0000003c,
318 	.dst_wr_index_addr	= 0x00000040,
319 	.current_srri_addr	= 0x00000044,
320 	.current_drri_addr	= 0x00000048,
321 	.ce_rri_low		= 0x0024C004,
322 	.ce_rri_high		= 0x0024C008,
323 	.host_ie_addr		= 0x0000002c,
324 	.ctrl1_regs		= &wcn3990_ctrl1,
325 	.host_ie		= &wcn3990_host_ie,
326 	.wm_regs		= &wcn3990_wm_reg,
327 	.misc_regs		= &wcn3990_misc_reg,
328 	.wm_srcr		= &wcn3990_wm_src_ring,
329 	.wm_dstr		= &wcn3990_wm_dst_ring,
330 	.upd			= &wcn3990_ctrl1_upd,
331 };
332 
333 const struct ath10k_hw_values wcn3990_values = {
334 	.rtc_state_val_on		= 5,
335 	.ce_count			= 12,
336 	.msi_assign_ce_max		= 12,
337 	.num_target_ce_config_wlan	= 12,
338 	.ce_desc_meta_data_mask		= 0xFFF0,
339 	.ce_desc_meta_data_lsb		= 4,
340 };
341 
342 static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
343 	.msb	= 0x00000010,
344 	.lsb	= 0x00000010,
345 	.mask	= GENMASK(16, 16),
346 };
347 
348 static struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
349 	.msb	= 0x00000011,
350 	.lsb	= 0x00000011,
351 	.mask	= GENMASK(17, 17),
352 };
353 
354 static struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
355 	.msb	= 0x0000000f,
356 	.lsb	= 0x00000000,
357 	.mask	= GENMASK(15, 0),
358 };
359 
360 static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
361 	.addr		= 0x00000010,
362 	.hw_mask	= 0x0007ffff,
363 	.sw_mask	= 0x0007ffff,
364 	.hw_wr_mask	= 0x00000000,
365 	.sw_wr_mask	= 0x0007ffff,
366 	.reset_mask	= 0xffffffff,
367 	.reset		= 0x00000080,
368 	.src_ring	= &qcax_src_ring,
369 	.dst_ring	= &qcax_dst_ring,
370 	.dmax		= &qcax_dmax,
371 };
372 
373 static struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
374 	.msb	= 0x00000003,
375 	.lsb	= 0x00000003,
376 	.mask	= GENMASK(3, 3),
377 };
378 
379 static struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
380 	.msb		= 0x00000000,
381 	.mask		= GENMASK(0, 0),
382 	.status_reset	= 0x00000000,
383 	.status		= &qcax_cmd_halt_status,
384 };
385 
386 static struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
387 	.msb	= 0x00000000,
388 	.lsb	= 0x00000000,
389 	.mask	= GENMASK(0, 0),
390 };
391 
392 static struct ath10k_hw_ce_host_ie qcax_host_ie = {
393 	.copy_complete_reset	= 0x00000000,
394 	.copy_complete		= &qcax_host_ie_cc,
395 };
396 
397 static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
398 	.dstr_lmask	= 0x00000010,
399 	.dstr_hmask	= 0x00000008,
400 	.srcr_lmask	= 0x00000004,
401 	.srcr_hmask	= 0x00000002,
402 	.cc_mask	= 0x00000001,
403 	.wm_mask	= 0x0000001E,
404 	.addr		= 0x00000030,
405 };
406 
407 static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
408 	.axi_err	= 0x00000400,
409 	.dstr_add_err	= 0x00000200,
410 	.srcr_len_err	= 0x00000100,
411 	.dstr_mlen_vio	= 0x00000080,
412 	.dstr_overflow	= 0x00000040,
413 	.srcr_overflow	= 0x00000020,
414 	.err_mask	= 0x000007E0,
415 	.addr		= 0x00000038,
416 };
417 
418 static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
419 	.msb    = 0x0000001f,
420 	.lsb	= 0x00000010,
421 	.mask	= GENMASK(31, 16),
422 };
423 
424 static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
425 	.msb	= 0x0000000f,
426 	.lsb	= 0x00000000,
427 	.mask	= GENMASK(15, 0),
428 };
429 
430 static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
431 	.addr		= 0x0000004c,
432 	.low_rst	= 0x00000000,
433 	.high_rst	= 0x00000000,
434 	.wm_low		= &qcax_src_wm_low,
435 	.wm_high        = &qcax_src_wm_high,
436 };
437 
438 static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
439 	.lsb	= 0x00000010,
440 	.mask	= GENMASK(31, 16),
441 };
442 
443 static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
444 	.msb	= 0x0000000f,
445 	.lsb	= 0x00000000,
446 	.mask	= GENMASK(15, 0),
447 };
448 
449 static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
450 	.addr		= 0x00000050,
451 	.low_rst	= 0x00000000,
452 	.high_rst	= 0x00000000,
453 	.wm_low		= &qcax_dst_wm_low,
454 	.wm_high	= &qcax_dst_wm_high,
455 };
456 
457 const struct ath10k_hw_ce_regs qcax_ce_regs = {
458 	.sr_base_addr_lo	= 0x00000000,
459 	.sr_size_addr		= 0x00000004,
460 	.dr_base_addr_lo	= 0x00000008,
461 	.dr_size_addr		= 0x0000000c,
462 	.ce_cmd_addr		= 0x00000018,
463 	.misc_ie_addr		= 0x00000034,
464 	.sr_wr_index_addr	= 0x0000003c,
465 	.dst_wr_index_addr	= 0x00000040,
466 	.current_srri_addr	= 0x00000044,
467 	.current_drri_addr	= 0x00000048,
468 	.host_ie_addr		= 0x0000002c,
469 	.ctrl1_regs		= &qcax_ctrl1,
470 	.cmd_halt		= &qcax_cmd_halt,
471 	.host_ie		= &qcax_host_ie,
472 	.wm_regs		= &qcax_wm_reg,
473 	.misc_regs		= &qcax_misc_reg,
474 	.wm_srcr		= &qcax_wm_src_ring,
475 	.wm_dstr                = &qcax_wm_dst_ring,
476 };
477 
478 const struct ath10k_hw_clk_params qca6174_clk[ATH10K_HW_REFCLK_COUNT] = {
479 	{
480 		.refclk = 48000000,
481 		.div = 0xe,
482 		.rnfrac = 0x2aaa8,
483 		.settle_time = 2400,
484 		.refdiv = 0,
485 		.outdiv = 1,
486 	},
487 	{
488 		.refclk = 19200000,
489 		.div = 0x24,
490 		.rnfrac = 0x2aaa8,
491 		.settle_time = 960,
492 		.refdiv = 0,
493 		.outdiv = 1,
494 	},
495 	{
496 		.refclk = 24000000,
497 		.div = 0x1d,
498 		.rnfrac = 0x15551,
499 		.settle_time = 1200,
500 		.refdiv = 0,
501 		.outdiv = 1,
502 	},
503 	{
504 		.refclk = 26000000,
505 		.div = 0x1b,
506 		.rnfrac = 0x4ec4,
507 		.settle_time = 1300,
508 		.refdiv = 0,
509 		.outdiv = 1,
510 	},
511 	{
512 		.refclk = 37400000,
513 		.div = 0x12,
514 		.rnfrac = 0x34b49,
515 		.settle_time = 1870,
516 		.refdiv = 0,
517 		.outdiv = 1,
518 	},
519 	{
520 		.refclk = 38400000,
521 		.div = 0x12,
522 		.rnfrac = 0x15551,
523 		.settle_time = 1920,
524 		.refdiv = 0,
525 		.outdiv = 1,
526 	},
527 	{
528 		.refclk = 40000000,
529 		.div = 0x12,
530 		.rnfrac = 0x26665,
531 		.settle_time = 2000,
532 		.refdiv = 0,
533 		.outdiv = 1,
534 	},
535 	{
536 		.refclk = 52000000,
537 		.div = 0x1b,
538 		.rnfrac = 0x4ec4,
539 		.settle_time = 2600,
540 		.refdiv = 0,
541 		.outdiv = 1,
542 	},
543 };
544 
ath10k_hw_fill_survey_time(struct ath10k * ar,struct survey_info * survey,u32 cc,u32 rcc,u32 cc_prev,u32 rcc_prev)545 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
546 				u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
547 {
548 	u32 cc_fix = 0;
549 	u32 rcc_fix = 0;
550 	enum ath10k_hw_cc_wraparound_type wraparound_type;
551 
552 	survey->filled |= SURVEY_INFO_TIME |
553 			  SURVEY_INFO_TIME_BUSY;
554 
555 	wraparound_type = ar->hw_params.cc_wraparound_type;
556 
557 	if (cc < cc_prev || rcc < rcc_prev) {
558 		switch (wraparound_type) {
559 		case ATH10K_HW_CC_WRAP_SHIFTED_ALL:
560 			if (cc < cc_prev) {
561 				cc_fix = 0x7fffffff;
562 				survey->filled &= ~SURVEY_INFO_TIME_BUSY;
563 			}
564 			break;
565 		case ATH10K_HW_CC_WRAP_SHIFTED_EACH:
566 			if (cc < cc_prev)
567 				cc_fix = 0x7fffffff;
568 
569 			if (rcc < rcc_prev)
570 				rcc_fix = 0x7fffffff;
571 			break;
572 		case ATH10K_HW_CC_WRAP_DISABLED:
573 			break;
574 		}
575 	}
576 
577 	cc -= cc_prev - cc_fix;
578 	rcc -= rcc_prev - rcc_fix;
579 
580 	survey->time = CCNT_TO_MSEC(ar, cc);
581 	survey->time_busy = CCNT_TO_MSEC(ar, rcc);
582 }
583 
584 /* The firmware does not support setting the coverage class. Instead this
585  * function monitors and modifies the corresponding MAC registers.
586  */
ath10k_hw_qca988x_set_coverage_class(struct ath10k * ar,s16 value)587 static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar,
588 						 s16 value)
589 {
590 	u32 slottime_reg;
591 	u32 slottime;
592 	u32 timeout_reg;
593 	u32 ack_timeout;
594 	u32 cts_timeout;
595 	u32 phyclk_reg;
596 	u32 phyclk;
597 	u64 fw_dbglog_mask;
598 	u32 fw_dbglog_level;
599 
600 	mutex_lock(&ar->conf_mutex);
601 
602 	/* Only modify registers if the core is started. */
603 	if ((ar->state != ATH10K_STATE_ON) &&
604 	    (ar->state != ATH10K_STATE_RESTARTED)) {
605 		spin_lock_bh(&ar->data_lock);
606 		/* Store config value for when radio boots up */
607 		ar->fw_coverage.coverage_class = value;
608 		spin_unlock_bh(&ar->data_lock);
609 		goto unlock;
610 	}
611 
612 	/* Retrieve the current values of the two registers that need to be
613 	 * adjusted.
614 	 */
615 	slottime_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
616 					     WAVE1_PCU_GBL_IFS_SLOT);
617 	timeout_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
618 					    WAVE1_PCU_ACK_CTS_TIMEOUT);
619 	phyclk_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
620 					   WAVE1_PHYCLK);
621 	phyclk = MS(phyclk_reg, WAVE1_PHYCLK_USEC) + 1;
622 
623 	if (value < 0)
624 		value = ar->fw_coverage.coverage_class;
625 
626 	/* Break out if the coverage class and registers have the expected
627 	 * value.
628 	 */
629 	if (value == ar->fw_coverage.coverage_class &&
630 	    slottime_reg == ar->fw_coverage.reg_slottime_conf &&
631 	    timeout_reg == ar->fw_coverage.reg_ack_cts_timeout_conf &&
632 	    phyclk_reg == ar->fw_coverage.reg_phyclk)
633 		goto unlock;
634 
635 	/* Store new initial register values from the firmware. */
636 	if (slottime_reg != ar->fw_coverage.reg_slottime_conf)
637 		ar->fw_coverage.reg_slottime_orig = slottime_reg;
638 	if (timeout_reg != ar->fw_coverage.reg_ack_cts_timeout_conf)
639 		ar->fw_coverage.reg_ack_cts_timeout_orig = timeout_reg;
640 	ar->fw_coverage.reg_phyclk = phyclk_reg;
641 
642 	/* Calculate new value based on the (original) firmware calculation. */
643 	slottime_reg = ar->fw_coverage.reg_slottime_orig;
644 	timeout_reg = ar->fw_coverage.reg_ack_cts_timeout_orig;
645 
646 	/* Do some sanity checks on the slottime register. */
647 	if (slottime_reg % phyclk) {
648 		ath10k_warn(ar,
649 			    "failed to set coverage class: expected integer microsecond value in register\n");
650 
651 		goto store_regs;
652 	}
653 
654 	slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT);
655 	slottime = slottime / phyclk;
656 	if (slottime != 9 && slottime != 20) {
657 		ath10k_warn(ar,
658 			    "failed to set coverage class: expected slot time of 9 or 20us in HW register. It is %uus.\n",
659 			    slottime);
660 
661 		goto store_regs;
662 	}
663 
664 	/* Recalculate the register values by adding the additional propagation
665 	 * delay (3us per coverage class).
666 	 */
667 
668 	slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT);
669 	slottime += value * 3 * phyclk;
670 	slottime = min_t(u32, slottime, WAVE1_PCU_GBL_IFS_SLOT_MAX);
671 	slottime = SM(slottime, WAVE1_PCU_GBL_IFS_SLOT);
672 	slottime_reg = (slottime_reg & ~WAVE1_PCU_GBL_IFS_SLOT_MASK) | slottime;
673 
674 	/* Update ack timeout (lower halfword). */
675 	ack_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK);
676 	ack_timeout += 3 * value * phyclk;
677 	ack_timeout = min_t(u32, ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX);
678 	ack_timeout = SM(ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK);
679 
680 	/* Update cts timeout (upper halfword). */
681 	cts_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS);
682 	cts_timeout += 3 * value * phyclk;
683 	cts_timeout = min_t(u32, cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX);
684 	cts_timeout = SM(cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS);
685 
686 	timeout_reg = ack_timeout | cts_timeout;
687 
688 	ath10k_hif_write32(ar,
689 			   WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_GBL_IFS_SLOT,
690 			   slottime_reg);
691 	ath10k_hif_write32(ar,
692 			   WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_ACK_CTS_TIMEOUT,
693 			   timeout_reg);
694 
695 	/* Ensure we have a debug level of WARN set for the case that the
696 	 * coverage class is larger than 0. This is important as we need to
697 	 * set the registers again if the firmware does an internal reset and
698 	 * this way we will be notified of the event.
699 	 */
700 	fw_dbglog_mask = ath10k_debug_get_fw_dbglog_mask(ar);
701 	fw_dbglog_level = ath10k_debug_get_fw_dbglog_level(ar);
702 
703 	if (value > 0) {
704 		if (fw_dbglog_level > ATH10K_DBGLOG_LEVEL_WARN)
705 			fw_dbglog_level = ATH10K_DBGLOG_LEVEL_WARN;
706 		fw_dbglog_mask = ~0;
707 	}
708 
709 	ath10k_wmi_dbglog_cfg(ar, fw_dbglog_mask, fw_dbglog_level);
710 
711 store_regs:
712 	/* After an error we will not retry setting the coverage class. */
713 	spin_lock_bh(&ar->data_lock);
714 	ar->fw_coverage.coverage_class = value;
715 	spin_unlock_bh(&ar->data_lock);
716 
717 	ar->fw_coverage.reg_slottime_conf = slottime_reg;
718 	ar->fw_coverage.reg_ack_cts_timeout_conf = timeout_reg;
719 
720 unlock:
721 	mutex_unlock(&ar->conf_mutex);
722 }
723 
724 /**
725  * ath10k_hw_qca6174_enable_pll_clock() - enable the qca6174 hw pll clock
726  * @ar: the ath10k blob
727  *
728  * This function is very hardware specific, the clock initialization
729  * steps is very sensitive and could lead to unknown crash, so they
730  * should be done in sequence.
731  *
732  * *** Be aware if you planned to refactor them. ***
733  *
734  * Return: 0 if successfully enable the pll, otherwise EINVAL
735  */
ath10k_hw_qca6174_enable_pll_clock(struct ath10k * ar)736 static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar)
737 {
738 	int ret, wait_limit;
739 	u32 clk_div_addr, pll_init_addr, speed_addr;
740 	u32 addr, reg_val, mem_val;
741 	struct ath10k_hw_params *hw;
742 	const struct ath10k_hw_clk_params *hw_clk;
743 
744 	hw = &ar->hw_params;
745 
746 	if (ar->regs->core_clk_div_address == 0 ||
747 	    ar->regs->cpu_pll_init_address == 0 ||
748 	    ar->regs->cpu_speed_address == 0)
749 		return -EINVAL;
750 
751 	clk_div_addr = ar->regs->core_clk_div_address;
752 	pll_init_addr = ar->regs->cpu_pll_init_address;
753 	speed_addr = ar->regs->cpu_speed_address;
754 
755 	/* Read efuse register to find out the right hw clock configuration */
756 	addr = (RTC_SOC_BASE_ADDRESS | EFUSE_OFFSET);
757 	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
758 	if (ret)
759 		return -EINVAL;
760 
761 	/* sanitize if the hw refclk index is out of the boundary */
762 	if (MS(reg_val, EFUSE_XTAL_SEL) > ATH10K_HW_REFCLK_COUNT)
763 		return -EINVAL;
764 
765 	hw_clk = &hw->hw_clk[MS(reg_val, EFUSE_XTAL_SEL)];
766 
767 	/* Set the rnfrac and outdiv params to bb_pll register */
768 	addr = (RTC_SOC_BASE_ADDRESS | BB_PLL_CONFIG_OFFSET);
769 	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
770 	if (ret)
771 		return -EINVAL;
772 
773 	reg_val &= ~(BB_PLL_CONFIG_FRAC_MASK | BB_PLL_CONFIG_OUTDIV_MASK);
774 	reg_val |= (SM(hw_clk->rnfrac, BB_PLL_CONFIG_FRAC) |
775 		    SM(hw_clk->outdiv, BB_PLL_CONFIG_OUTDIV));
776 	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
777 	if (ret)
778 		return -EINVAL;
779 
780 	/* Set the correct settle time value to pll_settle register */
781 	addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_SETTLE_OFFSET);
782 	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
783 	if (ret)
784 		return -EINVAL;
785 
786 	reg_val &= ~WLAN_PLL_SETTLE_TIME_MASK;
787 	reg_val |= SM(hw_clk->settle_time, WLAN_PLL_SETTLE_TIME);
788 	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
789 	if (ret)
790 		return -EINVAL;
791 
792 	/* Set the clock_ctrl div to core_clk_ctrl register */
793 	addr = (RTC_SOC_BASE_ADDRESS | SOC_CORE_CLK_CTRL_OFFSET);
794 	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
795 	if (ret)
796 		return -EINVAL;
797 
798 	reg_val &= ~SOC_CORE_CLK_CTRL_DIV_MASK;
799 	reg_val |= SM(1, SOC_CORE_CLK_CTRL_DIV);
800 	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
801 	if (ret)
802 		return -EINVAL;
803 
804 	/* Set the clock_div register */
805 	mem_val = 1;
806 	ret = ath10k_bmi_write_memory(ar, clk_div_addr, &mem_val,
807 				      sizeof(mem_val));
808 	if (ret)
809 		return -EINVAL;
810 
811 	/* Configure the pll_control register */
812 	addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
813 	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
814 	if (ret)
815 		return -EINVAL;
816 
817 	reg_val |= (SM(hw_clk->refdiv, WLAN_PLL_CONTROL_REFDIV) |
818 		    SM(hw_clk->div, WLAN_PLL_CONTROL_DIV) |
819 		    SM(1, WLAN_PLL_CONTROL_NOPWD));
820 	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
821 	if (ret)
822 		return -EINVAL;
823 
824 	/* busy wait (max 1s) the rtc_sync status register indicate ready */
825 	wait_limit = 100000;
826 	addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET);
827 	do {
828 		ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
829 		if (ret)
830 			return -EINVAL;
831 
832 		if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
833 			break;
834 
835 		wait_limit--;
836 		udelay(10);
837 
838 	} while (wait_limit > 0);
839 
840 	if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
841 		return -EINVAL;
842 
843 	/* Unset the pll_bypass in pll_control register */
844 	addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
845 	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
846 	if (ret)
847 		return -EINVAL;
848 
849 	reg_val &= ~WLAN_PLL_CONTROL_BYPASS_MASK;
850 	reg_val |= SM(0, WLAN_PLL_CONTROL_BYPASS);
851 	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
852 	if (ret)
853 		return -EINVAL;
854 
855 	/* busy wait (max 1s) the rtc_sync status register indicate ready */
856 	wait_limit = 100000;
857 	addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET);
858 	do {
859 		ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
860 		if (ret)
861 			return -EINVAL;
862 
863 		if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
864 			break;
865 
866 		wait_limit--;
867 		udelay(10);
868 
869 	} while (wait_limit > 0);
870 
871 	if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
872 		return -EINVAL;
873 
874 	/* Enable the hardware cpu clock register */
875 	addr = (RTC_SOC_BASE_ADDRESS | SOC_CPU_CLOCK_OFFSET);
876 	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
877 	if (ret)
878 		return -EINVAL;
879 
880 	reg_val &= ~SOC_CPU_CLOCK_STANDARD_MASK;
881 	reg_val |= SM(1, SOC_CPU_CLOCK_STANDARD);
882 	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
883 	if (ret)
884 		return -EINVAL;
885 
886 	/* unset the nopwd from pll_control register */
887 	addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
888 	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
889 	if (ret)
890 		return -EINVAL;
891 
892 	reg_val &= ~WLAN_PLL_CONTROL_NOPWD_MASK;
893 	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
894 	if (ret)
895 		return -EINVAL;
896 
897 	/* enable the pll_init register */
898 	mem_val = 1;
899 	ret = ath10k_bmi_write_memory(ar, pll_init_addr, &mem_val,
900 				      sizeof(mem_val));
901 	if (ret)
902 		return -EINVAL;
903 
904 	/* set the target clock frequency to speed register */
905 	ret = ath10k_bmi_write_memory(ar, speed_addr, &hw->target_cpu_freq,
906 				      sizeof(hw->target_cpu_freq));
907 	if (ret)
908 		return -EINVAL;
909 
910 	return 0;
911 }
912 
913 /* Program CPU_ADDR_MSB to allow different memory
914  * region access.
915  */
ath10k_hw_map_target_mem(struct ath10k * ar,u32 msb)916 static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb)
917 {
918 	u32 address = SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS;
919 
920 	ath10k_hif_write32(ar, address, msb);
921 }
922 
923 /* 1. Write to memory region of target, such as IRAM adn DRAM.
924  * 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000)
925  *    can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too.
926  * 3. In order to access the region other than the above,
927  *    we need to set the value of register CPU_ADDR_MSB.
928  * 4. Target memory access space is limited to 1M size. If the size is larger
929  *    than 1M, need to split it and program CPU_ADDR_MSB accordingly.
930  */
ath10k_hw_diag_segment_msb_download(struct ath10k * ar,const void * buffer,u32 address,u32 length)931 static int ath10k_hw_diag_segment_msb_download(struct ath10k *ar,
932 					       const void *buffer,
933 					       u32 address,
934 					       u32 length)
935 {
936 	u32 addr = address & REGION_ACCESS_SIZE_MASK;
937 	int ret, remain_size, size;
938 	const u8 *buf;
939 
940 	ath10k_hw_map_target_mem(ar, CPU_ADDR_MSB_REGION_VAL(address));
941 
942 	if (addr + length > REGION_ACCESS_SIZE_LIMIT) {
943 		size = REGION_ACCESS_SIZE_LIMIT - addr;
944 		remain_size = length - size;
945 
946 		ret = ath10k_hif_diag_write(ar, address, buffer, size);
947 		if (ret) {
948 			ath10k_warn(ar,
949 				    "failed to download the first %d bytes segment to address:0x%x: %d\n",
950 				    size, address, ret);
951 			goto done;
952 		}
953 
954 		/* Change msb to the next memory region*/
955 		ath10k_hw_map_target_mem(ar,
956 					 CPU_ADDR_MSB_REGION_VAL(address) + 1);
957 		buf = buffer +  size;
958 		ret = ath10k_hif_diag_write(ar,
959 					    address & ~REGION_ACCESS_SIZE_MASK,
960 					    buf, remain_size);
961 		if (ret) {
962 			ath10k_warn(ar,
963 				    "failed to download the second %d bytes segment to address:0x%x: %d\n",
964 				    remain_size,
965 				    address & ~REGION_ACCESS_SIZE_MASK,
966 				    ret);
967 			goto done;
968 		}
969 	} else {
970 		ret = ath10k_hif_diag_write(ar, address, buffer, length);
971 		if (ret) {
972 			ath10k_warn(ar,
973 				    "failed to download the only %d bytes segment to address:0x%x: %d\n",
974 				    length, address, ret);
975 			goto done;
976 		}
977 	}
978 
979 done:
980 	/* Change msb to DRAM */
981 	ath10k_hw_map_target_mem(ar,
982 				 CPU_ADDR_MSB_REGION_VAL(DRAM_BASE_ADDRESS));
983 	return ret;
984 }
985 
ath10k_hw_diag_segment_download(struct ath10k * ar,const void * buffer,u32 address,u32 length)986 static int ath10k_hw_diag_segment_download(struct ath10k *ar,
987 					   const void *buffer,
988 					   u32 address,
989 					   u32 length)
990 {
991 	if (address >= DRAM_BASE_ADDRESS + REGION_ACCESS_SIZE_LIMIT)
992 		/* Needs to change MSB for memory write */
993 		return ath10k_hw_diag_segment_msb_download(ar, buffer,
994 							   address, length);
995 	else
996 		return ath10k_hif_diag_write(ar, address, buffer, length);
997 }
998 
ath10k_hw_diag_fast_download(struct ath10k * ar,u32 address,const void * buffer,u32 length)999 int ath10k_hw_diag_fast_download(struct ath10k *ar,
1000 				 u32 address,
1001 				 const void *buffer,
1002 				 u32 length)
1003 {
1004 	const u8 *buf = buffer;
1005 	bool sgmt_end = false;
1006 	u32 base_addr = 0;
1007 	u32 base_len = 0;
1008 	u32 left = 0;
1009 	struct bmi_segmented_file_header *hdr;
1010 	struct bmi_segmented_metadata *metadata;
1011 	int ret = 0;
1012 
1013 	if (length < sizeof(*hdr))
1014 		return -EINVAL;
1015 
1016 	/* check firmware header. If it has no correct magic number
1017 	 * or it's compressed, returns error.
1018 	 */
1019 	hdr = (struct bmi_segmented_file_header *)buf;
1020 	if (__le32_to_cpu(hdr->magic_num) != BMI_SGMTFILE_MAGIC_NUM) {
1021 		ath10k_dbg(ar, ATH10K_DBG_BOOT,
1022 			   "Not a supported firmware, magic_num:0x%x\n",
1023 			   hdr->magic_num);
1024 		return -EINVAL;
1025 	}
1026 
1027 	if (hdr->file_flags != 0) {
1028 		ath10k_dbg(ar, ATH10K_DBG_BOOT,
1029 			   "Not a supported firmware, file_flags:0x%x\n",
1030 			   hdr->file_flags);
1031 		return -EINVAL;
1032 	}
1033 
1034 	metadata = (struct bmi_segmented_metadata *)hdr->data;
1035 	left = length - sizeof(*hdr);
1036 
1037 	while (left > 0) {
1038 		if (left < sizeof(*metadata)) {
1039 			ath10k_warn(ar, "firmware segment is truncated: %d\n",
1040 				    left);
1041 			ret = -EINVAL;
1042 			break;
1043 		}
1044 		base_addr = __le32_to_cpu(metadata->addr);
1045 		base_len = __le32_to_cpu(metadata->length);
1046 		buf = metadata->data;
1047 		left -= sizeof(*metadata);
1048 
1049 		switch (base_len) {
1050 		case BMI_SGMTFILE_BEGINADDR:
1051 			/* base_addr is the start address to run */
1052 			ret = ath10k_bmi_set_start(ar, base_addr);
1053 			base_len = 0;
1054 			break;
1055 		case BMI_SGMTFILE_DONE:
1056 			/* no more segment */
1057 			base_len = 0;
1058 			sgmt_end = true;
1059 			ret = 0;
1060 			break;
1061 		case BMI_SGMTFILE_BDDATA:
1062 		case BMI_SGMTFILE_EXEC:
1063 			ath10k_warn(ar,
1064 				    "firmware has unsupported segment:%d\n",
1065 				    base_len);
1066 			ret = -EINVAL;
1067 			break;
1068 		default:
1069 			if (base_len > left) {
1070 				/* sanity check */
1071 				ath10k_warn(ar,
1072 					    "firmware has invalid segment length, %d > %d\n",
1073 					    base_len, left);
1074 				ret = -EINVAL;
1075 				break;
1076 			}
1077 
1078 			ret = ath10k_hw_diag_segment_download(ar,
1079 							      buf,
1080 							      base_addr,
1081 							      base_len);
1082 
1083 			if (ret)
1084 				ath10k_warn(ar,
1085 					    "failed to download firmware via diag interface:%d\n",
1086 					    ret);
1087 			break;
1088 		}
1089 
1090 		if (ret || sgmt_end)
1091 			break;
1092 
1093 		metadata = (struct bmi_segmented_metadata *)(buf + base_len);
1094 		left -= base_len;
1095 	}
1096 
1097 	if (ret == 0)
1098 		ath10k_dbg(ar, ATH10K_DBG_BOOT,
1099 			   "boot firmware fast diag download successfully.\n");
1100 	return ret;
1101 }
1102 
ath10k_htt_tx_rssi_enable(struct htt_resp * resp)1103 static int ath10k_htt_tx_rssi_enable(struct htt_resp *resp)
1104 {
1105 	return (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI);
1106 }
1107 
ath10k_htt_tx_rssi_enable_wcn3990(struct htt_resp * resp)1108 static int ath10k_htt_tx_rssi_enable_wcn3990(struct htt_resp *resp)
1109 {
1110 	return (resp->data_tx_completion.flags2 &
1111 		HTT_TX_DATA_RSSI_ENABLE_WCN3990);
1112 }
1113 
ath10k_get_htt_tx_data_rssi_pad(struct htt_resp * resp)1114 static int ath10k_get_htt_tx_data_rssi_pad(struct htt_resp *resp)
1115 {
1116 	struct htt_data_tx_completion_ext extd;
1117 	int pad_bytes = 0;
1118 
1119 	if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_RETRIES)
1120 		pad_bytes += sizeof(extd.a_retries) /
1121 			     sizeof(extd.msdus_rssi[0]);
1122 
1123 	if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_TIMESTAMP)
1124 		pad_bytes += sizeof(extd.t_stamp) / sizeof(extd.msdus_rssi[0]);
1125 
1126 	return pad_bytes;
1127 }
1128 
1129 const struct ath10k_hw_ops qca988x_ops = {
1130 	.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
1131 };
1132 
ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc * rxd)1133 static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
1134 {
1135 	return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
1136 		  RX_MSDU_END_INFO1_L3_HDR_PAD);
1137 }
1138 
ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc * rxd)1139 static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
1140 {
1141 	return !!(rxd->msdu_end.common.info0 &
1142 		  __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR));
1143 }
1144 
1145 const struct ath10k_hw_ops qca99x0_ops = {
1146 	.rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
1147 	.rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
1148 };
1149 
1150 const struct ath10k_hw_ops qca6174_ops = {
1151 	.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
1152 	.enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
1153 	.is_rssi_enable = ath10k_htt_tx_rssi_enable,
1154 };
1155 
1156 const struct ath10k_hw_ops qca6174_sdio_ops = {
1157 	.enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
1158 };
1159 
1160 const struct ath10k_hw_ops wcn3990_ops = {
1161 	.tx_data_rssi_pad_bytes = ath10k_get_htt_tx_data_rssi_pad,
1162 	.is_rssi_enable = ath10k_htt_tx_rssi_enable_wcn3990,
1163 };
1164