1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020 Realtek Corporation
3 */
4
5 #include "coex.h"
6 #include "debug.h"
7 #include "mac.h"
8 #include "phy.h"
9 #include "reg.h"
10 #include "rtw8852a.h"
11 #include "rtw8852a_rfk.h"
12 #include "rtw8852a_rfk_table.h"
13 #include "rtw8852a_table.h"
14
_kpath(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)15 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
16 {
17 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
18 rtwdev->dbcc_en, phy_idx);
19
20 if (!rtwdev->dbcc_en)
21 return RF_AB;
22
23 if (phy_idx == RTW89_PHY_0)
24 return RF_A;
25 else
26 return RF_B;
27 }
28
29 static const u32 rtw8852a_backup_bb_regs[] = {0x2344, 0x58f0, 0x78f0};
30 static const u32 rtw8852a_backup_rf_regs[] = {0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5};
31 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852a_backup_bb_regs)
32 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852a_backup_rf_regs)
33
_rfk_backup_bb_reg(struct rtw89_dev * rtwdev,u32 backup_bb_reg_val[])34 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
35 {
36 u32 i;
37
38 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
39 backup_bb_reg_val[i] =
40 rtw89_phy_read32_mask(rtwdev, rtw8852a_backup_bb_regs[i],
41 MASKDWORD);
42 rtw89_debug(rtwdev, RTW89_DBG_RFK,
43 "[IQK]backup bb reg : %x, value =%x\n",
44 rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]);
45 }
46 }
47
_rfk_backup_rf_reg(struct rtw89_dev * rtwdev,u32 backup_rf_reg_val[],u8 rf_path)48 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
49 u8 rf_path)
50 {
51 u32 i;
52
53 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
54 backup_rf_reg_val[i] =
55 rtw89_read_rf(rtwdev, rf_path,
56 rtw8852a_backup_rf_regs[i], RFREG_MASK);
57 rtw89_debug(rtwdev, RTW89_DBG_RFK,
58 "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
59 rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]);
60 }
61 }
62
_rfk_restore_bb_reg(struct rtw89_dev * rtwdev,u32 backup_bb_reg_val[])63 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
64 u32 backup_bb_reg_val[])
65 {
66 u32 i;
67
68 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
69 rtw89_phy_write32_mask(rtwdev, rtw8852a_backup_bb_regs[i],
70 MASKDWORD, backup_bb_reg_val[i]);
71 rtw89_debug(rtwdev, RTW89_DBG_RFK,
72 "[IQK]restore bb reg : %x, value =%x\n",
73 rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]);
74 }
75 }
76
_rfk_restore_rf_reg(struct rtw89_dev * rtwdev,u32 backup_rf_reg_val[],u8 rf_path)77 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
78 u32 backup_rf_reg_val[], u8 rf_path)
79 {
80 u32 i;
81
82 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
83 rtw89_write_rf(rtwdev, rf_path, rtw8852a_backup_rf_regs[i],
84 RFREG_MASK, backup_rf_reg_val[i]);
85
86 rtw89_debug(rtwdev, RTW89_DBG_RFK,
87 "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
88 rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]);
89 }
90 }
91
_wait_rx_mode(struct rtw89_dev * rtwdev,u8 kpath)92 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
93 {
94 u8 path;
95 u32 rf_mode;
96 int ret;
97
98 for (path = 0; path < RF_PATH_MAX; path++) {
99 if (!(kpath & BIT(path)))
100 continue;
101
102 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
103 2, 5000, false, rtwdev, path, 0x00,
104 RR_MOD_MASK);
105 rtw89_debug(rtwdev, RTW89_DBG_RFK,
106 "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
107 path, ret);
108 }
109 }
110
_dack_dump(struct rtw89_dev * rtwdev)111 static void _dack_dump(struct rtw89_dev *rtwdev)
112 {
113 struct rtw89_dack_info *dack = &rtwdev->dack;
114 u8 i;
115 u8 t;
116
117 rtw89_debug(rtwdev, RTW89_DBG_RFK,
118 "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
119 dack->addck_d[0][0], dack->addck_d[0][1]);
120 rtw89_debug(rtwdev, RTW89_DBG_RFK,
121 "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
122 dack->addck_d[1][0], dack->addck_d[1][1]);
123 rtw89_debug(rtwdev, RTW89_DBG_RFK,
124 "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
125 dack->dadck_d[0][0], dack->dadck_d[0][1]);
126 rtw89_debug(rtwdev, RTW89_DBG_RFK,
127 "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
128 dack->dadck_d[1][0], dack->dadck_d[1][1]);
129
130 rtw89_debug(rtwdev, RTW89_DBG_RFK,
131 "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
132 dack->biask_d[0][0], dack->biask_d[0][1]);
133 rtw89_debug(rtwdev, RTW89_DBG_RFK,
134 "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
135 dack->biask_d[1][0], dack->biask_d[1][1]);
136
137 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
138 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
139 t = dack->msbk_d[0][0][i];
140 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
141 }
142 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
143 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
144 t = dack->msbk_d[0][1][i];
145 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
146 }
147 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
148 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
149 t = dack->msbk_d[1][0][i];
150 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
151 }
152 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
153 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
154 t = dack->msbk_d[1][1][i];
155 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
156 }
157 }
158
_afe_init(struct rtw89_dev * rtwdev)159 static void _afe_init(struct rtw89_dev *rtwdev)
160 {
161 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_afe_init_defs_tbl);
162 }
163
_addck_backup(struct rtw89_dev * rtwdev)164 static void _addck_backup(struct rtw89_dev *rtwdev)
165 {
166 struct rtw89_dack_info *dack = &rtwdev->dack;
167
168 rtw89_phy_write32_clr(rtwdev, R_S0_RXDC2, B_S0_RXDC2_SEL);
169 dack->addck_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK,
170 B_S0_ADDCK_Q);
171 dack->addck_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK,
172 B_S0_ADDCK_I);
173
174 rtw89_phy_write32_clr(rtwdev, R_S1_RXDC2, B_S1_RXDC2_SEL);
175 dack->addck_d[1][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK,
176 B_S1_ADDCK_Q);
177 dack->addck_d[1][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK,
178 B_S1_ADDCK_I);
179 }
180
_addck_reload(struct rtw89_dev * rtwdev)181 static void _addck_reload(struct rtw89_dev *rtwdev)
182 {
183 struct rtw89_dack_info *dack = &rtwdev->dack;
184
185 rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_I, dack->addck_d[0][0]);
186 rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2, B_S0_RXDC2_Q2,
187 (dack->addck_d[0][1] >> 6));
188 rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_Q,
189 (dack->addck_d[0][1] & 0x3f));
190 rtw89_phy_write32_set(rtwdev, R_S0_RXDC2, B_S0_RXDC2_MEN);
191 rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_I, dack->addck_d[1][0]);
192 rtw89_phy_write32_mask(rtwdev, R_S1_RXDC2, B_S1_RXDC2_Q2,
193 (dack->addck_d[1][1] >> 6));
194 rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_Q,
195 (dack->addck_d[1][1] & 0x3f));
196 rtw89_phy_write32_set(rtwdev, R_S1_RXDC2, B_S1_RXDC2_EN);
197 }
198
_dack_backup_s0(struct rtw89_dev * rtwdev)199 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
200 {
201 struct rtw89_dack_info *dack = &rtwdev->dack;
202 u8 i;
203
204 rtw89_phy_write32_set(rtwdev, R_S0_DACKI, B_S0_DACKI_EN);
205 rtw89_phy_write32_set(rtwdev, R_S0_DACKQ, B_S0_DACKQ_EN);
206 rtw89_phy_write32_set(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG);
207
208 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
209 rtw89_phy_write32_mask(rtwdev, R_S0_DACKI, B_S0_DACKI_AR, i);
210 dack->msbk_d[0][0][i] =
211 (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI7, B_S0_DACKI7_K);
212 rtw89_phy_write32_mask(rtwdev, R_S0_DACKQ, B_S0_DACKQ_AR, i);
213 dack->msbk_d[0][1][i] =
214 (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ7, B_S0_DACKQ7_K);
215 }
216 dack->biask_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI2,
217 B_S0_DACKI2_K);
218 dack->biask_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ2,
219 B_S0_DACKQ2_K);
220 dack->dadck_d[0][0] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI8,
221 B_S0_DACKI8_K) - 8;
222 dack->dadck_d[0][1] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ8,
223 B_S0_DACKQ8_K) - 8;
224 }
225
_dack_backup_s1(struct rtw89_dev * rtwdev)226 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
227 {
228 struct rtw89_dack_info *dack = &rtwdev->dack;
229 u8 i;
230
231 rtw89_phy_write32_set(rtwdev, R_S1_DACKI, B_S1_DACKI_EN);
232 rtw89_phy_write32_set(rtwdev, R_S1_DACKQ, B_S1_DACKQ_EN);
233 rtw89_phy_write32_set(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON);
234
235 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
236 rtw89_phy_write32_mask(rtwdev, R_S1_DACKI, B_S1_DACKI_AR, i);
237 dack->msbk_d[1][0][i] =
238 (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI7, B_S1_DACKI_K);
239 rtw89_phy_write32_mask(rtwdev, R_S1_DACKQ, B_S1_DACKQ_AR, i);
240 dack->msbk_d[1][1][i] =
241 (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ7, B_S1_DACKQ7_K);
242 }
243 dack->biask_d[1][0] =
244 (u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI2, B_S1_DACKI2_K);
245 dack->biask_d[1][1] =
246 (u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ2, B_S1_DACKQ2_K);
247 dack->dadck_d[1][0] =
248 (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI8, B_S1_DACKI8_K) - 8;
249 dack->dadck_d[1][1] =
250 (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ8, B_S1_DACKQ8_K) - 8;
251 }
252
_dack_reload_by_path(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 index)253 static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
254 enum rtw89_rf_path path, u8 index)
255 {
256 struct rtw89_dack_info *dack = &rtwdev->dack;
257 u32 tmp = 0, tmp_offset, tmp_reg;
258 u8 i;
259 u32 idx_offset, path_offset;
260
261 if (index == 0)
262 idx_offset = 0;
263 else
264 idx_offset = 0x50;
265
266 if (path == RF_PATH_A)
267 path_offset = 0;
268 else
269 path_offset = 0x2000;
270
271 tmp_offset = idx_offset + path_offset;
272 /* msbk_d: 15/14/13/12 */
273 tmp = 0x0;
274 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
275 tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
276 tmp_reg = 0x5e14 + tmp_offset;
277 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
278 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
279 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
280 /* msbk_d: 11/10/9/8 */
281 tmp = 0x0;
282 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
283 tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
284 tmp_reg = 0x5e18 + tmp_offset;
285 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
286 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
287 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
288 /* msbk_d: 7/6/5/4 */
289 tmp = 0x0;
290 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
291 tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
292 tmp_reg = 0x5e1c + tmp_offset;
293 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
294 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
295 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
296 /* msbk_d: 3/2/1/0 */
297 tmp = 0x0;
298 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
299 tmp |= dack->msbk_d[path][index][i] << (i * 8);
300 tmp_reg = 0x5e20 + tmp_offset;
301 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
302 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
303 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
304 /* dadak_d/biask_d */
305 tmp = 0x0;
306 tmp = (dack->biask_d[path][index] << 22) |
307 (dack->dadck_d[path][index] << 14);
308 tmp_reg = 0x5e24 + tmp_offset;
309 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
310 }
311
_dack_reload(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)312 static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
313 {
314 u8 i;
315
316 for (i = 0; i < 2; i++)
317 _dack_reload_by_path(rtwdev, path, i);
318
319 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
320 &rtw8852a_rfk_dack_reload_defs_a_tbl,
321 &rtw8852a_rfk_dack_reload_defs_b_tbl);
322 }
323
324 #define ADDC_T_AVG 100
_check_addc(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)325 static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
326 {
327 s32 dc_re = 0, dc_im = 0;
328 u32 tmp;
329 u32 i;
330
331 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
332 &rtw8852a_rfk_check_addc_defs_a_tbl,
333 &rtw8852a_rfk_check_addc_defs_b_tbl);
334
335 for (i = 0; i < ADDC_T_AVG; i++) {
336 tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD);
337 dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11);
338 dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11);
339 }
340
341 dc_re /= ADDC_T_AVG;
342 dc_im /= ADDC_T_AVG;
343
344 rtw89_debug(rtwdev, RTW89_DBG_RFK,
345 "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im);
346 }
347
_addck(struct rtw89_dev * rtwdev)348 static void _addck(struct rtw89_dev *rtwdev)
349 {
350 struct rtw89_dack_info *dack = &rtwdev->dack;
351 u32 val;
352 int ret;
353
354 /* S0 */
355 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_a_tbl);
356
357 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n");
358 _check_addc(rtwdev, RF_PATH_A);
359
360 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_a_tbl);
361
362 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
363 false, rtwdev, 0x1e00, BIT(0));
364 if (ret) {
365 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
366 dack->addck_timeout[0] = true;
367 }
368 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
369 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n");
370 _check_addc(rtwdev, RF_PATH_A);
371
372 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_a_tbl);
373
374 /* S1 */
375 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_b_tbl);
376
377 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n");
378 _check_addc(rtwdev, RF_PATH_B);
379
380 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_b_tbl);
381
382 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
383 false, rtwdev, 0x3e00, BIT(0));
384 if (ret) {
385 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
386 dack->addck_timeout[1] = true;
387 }
388 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
389 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n");
390 _check_addc(rtwdev, RF_PATH_B);
391
392 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_b_tbl);
393 }
394
_check_dadc(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)395 static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
396 {
397 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
398 &rtw8852a_rfk_check_dadc_defs_f_a_tbl,
399 &rtw8852a_rfk_check_dadc_defs_f_b_tbl);
400
401 _check_addc(rtwdev, path);
402
403 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
404 &rtw8852a_rfk_check_dadc_defs_r_a_tbl,
405 &rtw8852a_rfk_check_dadc_defs_r_b_tbl);
406 }
407
_dack_s0(struct rtw89_dev * rtwdev)408 static void _dack_s0(struct rtw89_dev *rtwdev)
409 {
410 struct rtw89_dack_info *dack = &rtwdev->dack;
411 u32 val;
412 int ret;
413
414 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_a_tbl);
415
416 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
417 false, rtwdev, 0x5e28, BIT(15));
418 ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
419 false, rtwdev, 0x5e78, BIT(15));
420 if (ret) {
421 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n");
422 dack->msbk_timeout[0] = true;
423 }
424 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
425
426 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_a_tbl);
427
428 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
429 false, rtwdev, 0x5e48, BIT(17));
430 ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
431 false, rtwdev, 0x5e98, BIT(17));
432 if (ret) {
433 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADACK timeout\n");
434 dack->dadck_timeout[0] = true;
435 }
436 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
437
438 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_a_tbl);
439
440 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
441 _check_dadc(rtwdev, RF_PATH_A);
442
443 _dack_backup_s0(rtwdev);
444 _dack_reload(rtwdev, RF_PATH_A);
445
446 rtw89_phy_write32_clr(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG);
447 }
448
_dack_s1(struct rtw89_dev * rtwdev)449 static void _dack_s1(struct rtw89_dev *rtwdev)
450 {
451 struct rtw89_dack_info *dack = &rtwdev->dack;
452 u32 val;
453 int ret;
454
455 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_b_tbl);
456
457 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
458 false, rtwdev, 0x7e28, BIT(15));
459 ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
460 false, rtwdev, 0x7e78, BIT(15));
461 if (ret) {
462 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n");
463 dack->msbk_timeout[1] = true;
464 }
465 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
466
467 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_b_tbl);
468
469 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
470 false, rtwdev, 0x7e48, BIT(17));
471 ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
472 false, rtwdev, 0x7e98, BIT(17));
473 if (ret) {
474 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n");
475 dack->dadck_timeout[1] = true;
476 }
477 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
478
479 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_b_tbl);
480
481 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
482 _check_dadc(rtwdev, RF_PATH_B);
483
484 _dack_backup_s1(rtwdev);
485 _dack_reload(rtwdev, RF_PATH_B);
486
487 rtw89_phy_write32_clr(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON);
488 }
489
_dack(struct rtw89_dev * rtwdev)490 static void _dack(struct rtw89_dev *rtwdev)
491 {
492 _dack_s0(rtwdev);
493 _dack_s1(rtwdev);
494 }
495
_dac_cal(struct rtw89_dev * rtwdev,bool force)496 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
497 {
498 struct rtw89_dack_info *dack = &rtwdev->dack;
499 u32 rf0_0, rf1_0;
500 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
501
502 dack->dack_done = false;
503 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
504 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
505 rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
506 rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
507 _afe_init(rtwdev);
508 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
509 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
510 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x30001);
511 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x30001);
512 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
513 _addck(rtwdev);
514 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
515 _addck_backup(rtwdev);
516 _addck_reload(rtwdev);
517 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001);
518 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x40001);
519 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
520 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
521 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
522 _dack(rtwdev);
523 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
524 _dack_dump(rtwdev);
525 dack->dack_done = true;
526 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
527 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
528 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
529 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
530 dack->dack_cnt++;
531 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
532 }
533
534 #define RTW8852A_NCTL_VER 0xd
535 #define RTW8852A_IQK_VER 0x2a
536 #define RTW8852A_IQK_SS 2
537 #define RTW8852A_IQK_THR_REK 8
538 #define RTW8852A_IQK_CFIR_GROUP_NR 4
539
540 enum rtw8852a_iqk_type {
541 ID_TXAGC,
542 ID_FLOK_COARSE,
543 ID_FLOK_FINE,
544 ID_TXK,
545 ID_RXAGC,
546 ID_RXK,
547 ID_NBTXK,
548 ID_NBRXK,
549 };
550
_iqk_read_fft_dbcc0(struct rtw89_dev * rtwdev,u8 path)551 static void _iqk_read_fft_dbcc0(struct rtw89_dev *rtwdev, u8 path)
552 {
553 u8 i = 0x0;
554 u32 fft[6] = {0x0};
555
556 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
557 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00160000);
558 fft[0] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
559 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00170000);
560 fft[1] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
561 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00180000);
562 fft[2] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
563 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00190000);
564 fft[3] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
565 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001a0000);
566 fft[4] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
567 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001b0000);
568 fft[5] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
569 for (i = 0; i < 6; i++)
570 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x,fft[%x]= %x\n",
571 path, i, fft[i]);
572 }
573
_iqk_read_xym_dbcc0(struct rtw89_dev * rtwdev,u8 path)574 static void _iqk_read_xym_dbcc0(struct rtw89_dev *rtwdev, u8 path)
575 {
576 u8 i = 0x0;
577 u32 tmp = 0x0;
578
579 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
580 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path);
581 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX, 0x1);
582
583 for (i = 0x0; i < 0x18; i++) {
584 rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x000000c0 + i);
585 rtw89_phy_write32_clr(rtwdev, R_NCTL_N2, MASKDWORD);
586 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
587 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = %x\n",
588 path, BIT(path), tmp);
589 udelay(1);
590 }
591 rtw89_phy_write32_clr(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX);
592 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000);
593 rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x80010100);
594 udelay(1);
595 }
596
_iqk_read_txcfir_dbcc0(struct rtw89_dev * rtwdev,u8 path,u8 group)597 static void _iqk_read_txcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path,
598 u8 group)
599 {
600 static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = {
601 {0x8f20, 0x8f54, 0x8f88, 0x8fbc},
602 {0x9320, 0x9354, 0x9388, 0x93bc},
603 };
604 u8 idx = 0x0;
605 u32 tmp = 0x0;
606 u32 base_addr;
607
608 if (path >= RTW8852A_IQK_SS) {
609 rtw89_warn(rtwdev, "cfir path %d out of range\n", path);
610 return;
611 }
612 if (group >= RTW8852A_IQK_CFIR_GROUP_NR) {
613 rtw89_warn(rtwdev, "cfir group %d out of range\n", group);
614 return;
615 }
616
617 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
618 rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001);
619
620 base_addr = base_addrs[path][group];
621
622 for (idx = 0; idx < 0x0d; idx++) {
623 tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD);
624 rtw89_debug(rtwdev, RTW89_DBG_RFK,
625 "[IQK] %x = %x\n",
626 base_addr + (idx << 2), tmp);
627 }
628
629 if (path == 0x0) {
630 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
631 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C0, MASKDWORD);
632 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f50 = %x\n", tmp);
633 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C1, MASKDWORD);
634 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f84 = %x\n", tmp);
635 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C2, MASKDWORD);
636 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fb8 = %x\n", tmp);
637 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C3, MASKDWORD);
638 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fec = %x\n", tmp);
639 } else {
640 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
641 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C0, MASKDWORD);
642 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9350 = %x\n", tmp);
643 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C1, MASKDWORD);
644 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9384 = %x\n", tmp);
645 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C2, MASKDWORD);
646 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93b8 = %x\n", tmp);
647 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C3, MASKDWORD);
648 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93ec = %x\n", tmp);
649 }
650 rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD);
651 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xc);
652 udelay(1);
653 tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD);
654 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path,
655 BIT(path), tmp);
656 }
657
_iqk_read_rxcfir_dbcc0(struct rtw89_dev * rtwdev,u8 path,u8 group)658 static void _iqk_read_rxcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path,
659 u8 group)
660 {
661 static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = {
662 {0x8d00, 0x8d44, 0x8d88, 0x8dcc},
663 {0x9100, 0x9144, 0x9188, 0x91cc},
664 };
665 u8 idx = 0x0;
666 u32 tmp = 0x0;
667 u32 base_addr;
668
669 if (path >= RTW8852A_IQK_SS) {
670 rtw89_warn(rtwdev, "cfir path %d out of range\n", path);
671 return;
672 }
673 if (group >= RTW8852A_IQK_CFIR_GROUP_NR) {
674 rtw89_warn(rtwdev, "cfir group %d out of range\n", group);
675 return;
676 }
677
678 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
679 rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001);
680
681 base_addr = base_addrs[path][group];
682 for (idx = 0; idx < 0x10; idx++) {
683 tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD);
684 rtw89_debug(rtwdev, RTW89_DBG_RFK,
685 "[IQK]%x = %x\n",
686 base_addr + (idx << 2), tmp);
687 }
688
689 if (path == 0x0) {
690 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
691 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C0, MASKDWORD);
692 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d40 = %x\n", tmp);
693 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C1, MASKDWORD);
694 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d84 = %x\n", tmp);
695 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C2, MASKDWORD);
696 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8dc8 = %x\n", tmp);
697 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C3, MASKDWORD);
698 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8e0c = %x\n", tmp);
699 } else {
700 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
701 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C0, MASKDWORD);
702 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9140 = %x\n", tmp);
703 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C1, MASKDWORD);
704 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9184 = %x\n", tmp);
705 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C2, MASKDWORD);
706 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x91c8 = %x\n", tmp);
707 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C3, MASKDWORD);
708 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x920c = %x\n", tmp);
709 }
710 rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD);
711 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xd);
712 tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD);
713 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path,
714 BIT(path), tmp);
715 }
716
_iqk_sram(struct rtw89_dev * rtwdev,u8 path)717 static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path)
718 {
719 u32 tmp = 0x0;
720 u32 i = 0x0;
721
722 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
723 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000);
724 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080);
725 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000);
726 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
727
728 for (i = 0; i <= 0x9f; i++) {
729 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i);
730 tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
731 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp);
732 }
733
734 for (i = 0; i <= 0x9f; i++) {
735 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i);
736 tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
737 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp);
738 }
739 rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX2, MASKDWORD);
740 rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX, MASKDWORD);
741 }
742
_iqk_rxk_setting(struct rtw89_dev * rtwdev,u8 path)743 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
744 {
745 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
746 u32 tmp = 0x0;
747
748 rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
749 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x3);
750 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
751 udelay(1);
752 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x3);
753 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
754 udelay(1);
755 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
756 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x0);
757 udelay(1);
758 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303);
759 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000);
760
761 switch (iqk_info->iqk_band[path]) {
762 case RTW89_BAND_2G:
763 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2);
764 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
765 break;
766 case RTW89_BAND_5G:
767 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2);
768 rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x5);
769 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
770 break;
771 default:
772 break;
773 }
774 tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
775 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
776 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
777 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
778 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
779 fsleep(128);
780 }
781
_iqk_check_cal(struct rtw89_dev * rtwdev,u8 path,u8 ktype)782 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
783 {
784 u32 tmp;
785 u32 val;
786 int ret;
787
788 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1, 8200,
789 false, rtwdev, 0xbff8, MASKBYTE0);
790 if (ret)
791 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
792 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
793 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
794 tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
795 rtw89_debug(rtwdev, RTW89_DBG_RFK,
796 "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
797
798 return false;
799 }
800
_iqk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path,u8 ktype)801 static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
802 enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
803 {
804 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
805 bool fail = false;
806 u32 iqk_cmd = 0x0;
807 u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path);
808 u32 addr_rfc_ctl = 0x0;
809
810 if (path == RF_PATH_A)
811 addr_rfc_ctl = 0x5864;
812 else
813 addr_rfc_ctl = 0x7864;
814
815 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
816 switch (ktype) {
817 case ID_TXAGC:
818 iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
819 break;
820 case ID_FLOK_COARSE:
821 rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
822 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
823 iqk_cmd = 0x108 | (1 << (4 + path));
824 break;
825 case ID_FLOK_FINE:
826 rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
827 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
828 iqk_cmd = 0x208 | (1 << (4 + path));
829 break;
830 case ID_TXK:
831 rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
832 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025);
833 iqk_cmd = 0x008 | (1 << (path + 4)) |
834 (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
835 break;
836 case ID_RXAGC:
837 iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
838 break;
839 case ID_RXK:
840 rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
841 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
842 iqk_cmd = 0x008 | (1 << (path + 4)) |
843 (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
844 break;
845 case ID_NBTXK:
846 rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
847 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025);
848 iqk_cmd = 0x308 | (1 << (4 + path));
849 break;
850 case ID_NBRXK:
851 rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
852 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
853 iqk_cmd = 0x608 | (1 << (4 + path));
854 break;
855 default:
856 return false;
857 }
858
859 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
860 rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN);
861 udelay(1);
862 fail = _iqk_check_cal(rtwdev, path, ktype);
863 if (iqk_info->iqk_xym_en)
864 _iqk_read_xym_dbcc0(rtwdev, path);
865 if (iqk_info->iqk_fft_en)
866 _iqk_read_fft_dbcc0(rtwdev, path);
867 if (iqk_info->iqk_sram_en)
868 _iqk_sram(rtwdev, path);
869 if (iqk_info->iqk_cfir_en) {
870 if (ktype == ID_TXK) {
871 _iqk_read_txcfir_dbcc0(rtwdev, path, 0x0);
872 _iqk_read_txcfir_dbcc0(rtwdev, path, 0x1);
873 _iqk_read_txcfir_dbcc0(rtwdev, path, 0x2);
874 _iqk_read_txcfir_dbcc0(rtwdev, path, 0x3);
875 } else {
876 _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x0);
877 _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x1);
878 _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x2);
879 _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x3);
880 }
881 }
882
883 rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
884
885 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
886
887 return fail;
888 }
889
_rxk_group_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)890 static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
891 enum rtw89_phy_idx phy_idx, u8 path)
892 {
893 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
894 static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0};
895 static const u32 attc2_a[4] = {0x0, 0x0, 0x07, 0x30};
896 static const u32 attc1_a[4] = {0x7, 0x5, 0x1, 0x1};
897 static const u32 rxgn_g[4] = {0x1CC, 0x1E0, 0x2CC, 0x2E0};
898 static const u32 attc2_g[4] = {0x0, 0x15, 0x3, 0x1a};
899 static const u32 attc1_g[4] = {0x1, 0x0, 0x1, 0x0};
900 u8 gp = 0x0;
901 bool fail = false;
902 u32 rf0 = 0x0;
903
904 for (gp = 0; gp < 0x4; gp++) {
905 switch (iqk_info->iqk_band[path]) {
906 case RTW89_BAND_2G:
907 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_g[gp]);
908 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, attc2_g[gp]);
909 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, attc1_g[gp]);
910 break;
911 case RTW89_BAND_5G:
912 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_a[gp]);
913 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, attc2_a[gp]);
914 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, attc1_a[gp]);
915 break;
916 default:
917 break;
918 }
919 rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
920 rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
921 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI,
922 rf0 | iqk_info->syn1to2);
923 rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100);
924 rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR);
925 rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
926 rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
927 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
928 rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1);
929 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
930 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
931 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
932 }
933
934 switch (iqk_info->iqk_band[path]) {
935 case RTW89_BAND_2G:
936 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
937 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
938 break;
939 case RTW89_BAND_5G:
940 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
941 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
942 rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0);
943 break;
944 default:
945 break;
946 }
947 iqk_info->nb_rxcfir[path] = 0x40000000;
948 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
949 B_IQK_RES_RXCFIR, 0x5);
950 iqk_info->is_wb_rxiqk[path] = true;
951 return false;
952 }
953
_iqk_nbrxk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)954 static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
955 enum rtw89_phy_idx phy_idx, u8 path)
956 {
957 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
958 u8 group = 0x0;
959 u32 rf0 = 0x0, tmp = 0x0;
960 u32 idxrxgain_a = 0x1a0;
961 u32 idxattc2_a = 0x00;
962 u32 idxattc1_a = 0x5;
963 u32 idxrxgain_g = 0x1E0;
964 u32 idxattc2_g = 0x15;
965 u32 idxattc1_g = 0x0;
966 bool fail = false;
967
968 switch (iqk_info->iqk_band[path]) {
969 case RTW89_BAND_2G:
970 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_g);
971 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, idxattc2_g);
972 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, idxattc1_g);
973 break;
974 case RTW89_BAND_5G:
975 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_a);
976 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, idxattc2_a);
977 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, idxattc1_a);
978 break;
979 default:
980 break;
981 }
982 rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
983 rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
984 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI,
985 rf0 | iqk_info->syn1to2);
986 rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100);
987 rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR);
988 rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
989 rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
990 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
991 B_CFIR_LUT_GP, group);
992 rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
993 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
994 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
995
996 switch (iqk_info->iqk_band[path]) {
997 case RTW89_BAND_2G:
998 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
999 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1000 break;
1001 case RTW89_BAND_5G:
1002 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
1003 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1004 rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0);
1005 break;
1006 default:
1007 break;
1008 }
1009 if (!fail) {
1010 tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1011 iqk_info->nb_rxcfir[path] = tmp | 0x2;
1012 } else {
1013 iqk_info->nb_rxcfir[path] = 0x40000002;
1014 }
1015 return fail;
1016 }
1017
_iqk_rxclk_setting(struct rtw89_dev * rtwdev,u8 path)1018 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
1019 {
1020 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1021
1022 if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
1023 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1024 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8),
1025 MASKDWORD, 0x4d000a08);
1026 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1027 B_P0_RXCK_VAL, 0x2);
1028 rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
1029 rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON);
1030 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1);
1031 } else {
1032 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8),
1033 MASKDWORD, 0x44000a08);
1034 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1035 B_P0_RXCK_VAL, 0x1);
1036 rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
1037 rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON);
1038 rtw89_phy_write32_clr(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL);
1039 }
1040 }
1041
_txk_group_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1042 static bool _txk_group_sel(struct rtw89_dev *rtwdev,
1043 enum rtw89_phy_idx phy_idx, u8 path)
1044 {
1045 static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED};
1046 static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED};
1047 static const u32 a_itqt[4] = {0x12, 0x12, 0x12, 0x1b};
1048 static const u32 g_itqt[4] = {0x09, 0x12, 0x12, 0x12};
1049 static const u32 g_attsmxr[4] = {0x0, 0x1, 0x1, 0x1};
1050 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1051 bool fail = false;
1052 u8 gp = 0x0;
1053 u32 tmp = 0x0;
1054
1055 for (gp = 0x0; gp < 0x4; gp++) {
1056 switch (iqk_info->iqk_band[path]) {
1057 case RTW89_BAND_2G:
1058 rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1059 B_RFGAIN_BND, 0x08);
1060 rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL,
1061 g_txgain[gp]);
1062 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1,
1063 g_attsmxr[gp]);
1064 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0,
1065 g_attsmxr[gp]);
1066 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1067 MASKDWORD, g_itqt[gp]);
1068 break;
1069 case RTW89_BAND_5G:
1070 rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1071 B_RFGAIN_BND, 0x04);
1072 rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL,
1073 a_txgain[gp]);
1074 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1075 MASKDWORD, a_itqt[gp]);
1076 break;
1077 default:
1078 break;
1079 }
1080 rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
1081 rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1082 rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
1083 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1084 B_CFIR_LUT_GP, gp);
1085 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1086 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1087 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail);
1088 }
1089
1090 iqk_info->nb_txcfir[path] = 0x40000000;
1091 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1092 B_IQK_RES_TXCFIR, 0x5);
1093 iqk_info->is_wb_txiqk[path] = true;
1094 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1095 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path,
1096 BIT(path), tmp);
1097 return false;
1098 }
1099
_iqk_nbtxk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1100 static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
1101 enum rtw89_phy_idx phy_idx, u8 path)
1102 {
1103 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1104 u8 group = 0x2;
1105 u32 a_mode_txgain = 0x64e2;
1106 u32 g_mode_txgain = 0x61e8;
1107 u32 attsmxr = 0x1;
1108 u32 itqt = 0x12;
1109 u32 tmp = 0x0;
1110 bool fail = false;
1111
1112 switch (iqk_info->iqk_band[path]) {
1113 case RTW89_BAND_2G:
1114 rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1115 B_RFGAIN_BND, 0x08);
1116 rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, g_mode_txgain);
1117 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, attsmxr);
1118 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, attsmxr);
1119 break;
1120 case RTW89_BAND_5G:
1121 rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1122 B_RFGAIN_BND, 0x04);
1123 rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, a_mode_txgain);
1124 break;
1125 default:
1126 break;
1127 }
1128 rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
1129 rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1130 rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
1131 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group);
1132 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
1133 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1134 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1135 if (!fail) {
1136 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1137 iqk_info->nb_txcfir[path] = tmp | 0x2;
1138 } else {
1139 iqk_info->nb_txcfir[path] = 0x40000002;
1140 }
1141 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1142 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path,
1143 BIT(path), tmp);
1144 return fail;
1145 }
1146
_lok_res_table(struct rtw89_dev * rtwdev,u8 path,u8 ibias)1147 static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
1148 {
1149 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1150
1151 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias);
1152 rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2);
1153 if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1154 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0);
1155 else
1156 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1);
1157 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias);
1158 rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
1159 }
1160
_lok_finetune_check(struct rtw89_dev * rtwdev,u8 path)1161 static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
1162 {
1163 bool is_fail = false;
1164 u32 tmp = 0x0;
1165 u32 core_i = 0x0;
1166 u32 core_q = 0x0;
1167
1168 tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
1169 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK][FineLOK] S%x, 0x58 = 0x%x\n",
1170 path, tmp);
1171 core_i = FIELD_GET(RR_TXMO_COI, tmp);
1172 core_q = FIELD_GET(RR_TXMO_COQ, tmp);
1173 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, i = 0x%x\n", path, core_i);
1174 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, q = 0x%x\n", path, core_q);
1175
1176 if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
1177 is_fail = true;
1178 return is_fail;
1179 }
1180
_iqk_lok(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1181 static bool _iqk_lok(struct rtw89_dev *rtwdev,
1182 enum rtw89_phy_idx phy_idx, u8 path)
1183 {
1184 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1185 u32 rf0 = 0x0;
1186 u8 itqt = 0x12;
1187 bool fail = false;
1188 bool tmp = false;
1189
1190 switch (iqk_info->iqk_band[path]) {
1191 case RTW89_BAND_2G:
1192 rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe5e0);
1193 itqt = 0x09;
1194 break;
1195 case RTW89_BAND_5G:
1196 rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe4e0);
1197 itqt = 0x12;
1198 break;
1199 default:
1200 break;
1201 }
1202 rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
1203 rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
1204 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF1, B_IQK_DIF1_TXPI,
1205 rf0 | iqk_info->syn1to2);
1206 rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
1207 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
1208 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, 0x1);
1209 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, 0x0);
1210 rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
1211 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1212 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
1213 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
1214 iqk_info->lok_cor_fail[0][path] = tmp;
1215 fsleep(10);
1216 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
1217 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
1218 iqk_info->lok_fin_fail[0][path] = tmp;
1219 fail = _lok_finetune_check(rtwdev, path);
1220 return fail;
1221 }
1222
_iqk_txk_setting(struct rtw89_dev * rtwdev,u8 path)1223 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1224 {
1225 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1226
1227 rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
1228 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1229 udelay(1);
1230 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1231 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1232 udelay(1);
1233 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1234 udelay(1);
1235 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303);
1236 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000);
1237 switch (iqk_info->iqk_band[path]) {
1238 case RTW89_BAND_2G:
1239 rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW, 0x00);
1240 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f);
1241 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1242 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x1);
1243 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1244 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0);
1245 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1246 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
1247 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x000);
1248 rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
1249 rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
1250 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1251 0x403e0 | iqk_info->syn1to2);
1252 udelay(1);
1253 break;
1254 case RTW89_BAND_5G:
1255 rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00);
1256 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f);
1257 rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x7);
1258 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0);
1259 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1260 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
1261 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x100);
1262 rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
1263 rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
1264 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x1);
1265 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x0);
1266 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1267 0x403e0 | iqk_info->syn1to2);
1268 udelay(1);
1269 break;
1270 default:
1271 break;
1272 }
1273 }
1274
_iqk_txclk_setting(struct rtw89_dev * rtwdev,u8 path)1275 static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path)
1276 {
1277 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
1278 }
1279
_iqk_info_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1280 static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1281 u8 path)
1282 {
1283 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1284 u32 tmp = 0x0;
1285 bool flag = 0x0;
1286
1287 iqk_info->thermal[path] =
1288 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
1289 iqk_info->thermal_rek_en = false;
1290 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %d\n", path,
1291 iqk_info->thermal[path]);
1292 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
1293 iqk_info->lok_cor_fail[0][path]);
1294 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
1295 iqk_info->lok_fin_fail[0][path]);
1296 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
1297 iqk_info->iqk_tx_fail[0][path]);
1298 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
1299 iqk_info->iqk_rx_fail[0][path]);
1300 flag = iqk_info->lok_cor_fail[0][path];
1301 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(0) << (path * 4), flag);
1302 flag = iqk_info->lok_fin_fail[0][path];
1303 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(1) << (path * 4), flag);
1304 flag = iqk_info->iqk_tx_fail[0][path];
1305 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(2) << (path * 4), flag);
1306 flag = iqk_info->iqk_rx_fail[0][path];
1307 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(3) << (path * 4), flag);
1308
1309 tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
1310 iqk_info->bp_iqkenable[path] = tmp;
1311 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1312 iqk_info->bp_txkresult[path] = tmp;
1313 tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1314 iqk_info->bp_rxkresult[path] = tmp;
1315
1316 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
1317 (u8)iqk_info->iqk_times);
1318
1319 tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, 0x0000000f << (path * 4));
1320 if (tmp != 0x0)
1321 iqk_info->iqk_fail_cnt++;
1322 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x00ff0000 << (path * 4),
1323 iqk_info->iqk_fail_cnt);
1324 }
1325
1326 static
_iqk_by_path(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1327 void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1328 {
1329 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1330 bool lok_is_fail = false;
1331 u8 ibias = 0x1;
1332 u8 i = 0;
1333
1334 _iqk_txclk_setting(rtwdev, path);
1335
1336 for (i = 0; i < 3; i++) {
1337 _lok_res_table(rtwdev, path, ibias++);
1338 _iqk_txk_setting(rtwdev, path);
1339 lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
1340 if (!lok_is_fail)
1341 break;
1342 }
1343 if (iqk_info->is_nbiqk)
1344 iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
1345 else
1346 iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
1347
1348 _iqk_rxclk_setting(rtwdev, path);
1349 _iqk_rxk_setting(rtwdev, path);
1350 if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G)
1351 iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
1352 else
1353 iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
1354
1355 _iqk_info_iqk(rtwdev, phy_idx, path);
1356 }
1357
_iqk_get_ch_info(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 path)1358 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
1359 enum rtw89_phy_idx phy, u8 path)
1360 {
1361 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1362 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1363 u32 reg_rf18 = 0x0, reg_35c = 0x0;
1364 u8 idx = 0;
1365 u8 get_empty_table = false;
1366
1367 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1368 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1369 if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
1370 get_empty_table = true;
1371 break;
1372 }
1373 }
1374 if (!get_empty_table) {
1375 idx = iqk_info->iqk_table_idx[path] + 1;
1376 if (idx > RTW89_IQK_CHS_NR - 1)
1377 idx = 0;
1378 }
1379 reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1380 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]cfg ch = %d\n", reg_rf18);
1381 reg_35c = rtw89_phy_read32_mask(rtwdev, 0x35c, 0x00000c00);
1382
1383 iqk_info->iqk_band[path] = chan->band_type;
1384 iqk_info->iqk_bw[path] = chan->band_width;
1385 iqk_info->iqk_ch[path] = chan->channel;
1386
1387 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1388 "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
1389 iqk_info->iqk_band[path]);
1390 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
1391 path, iqk_info->iqk_bw[path]);
1392 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
1393 path, iqk_info->iqk_ch[path]);
1394 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1395 "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
1396 rtwdev->dbcc_en ? "on" : "off",
1397 iqk_info->iqk_band[path] == 0 ? "2G" :
1398 iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
1399 iqk_info->iqk_ch[path],
1400 iqk_info->iqk_bw[path] == 0 ? "20M" :
1401 iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
1402 if (reg_35c == 0x01)
1403 iqk_info->syn1to2 = 0x1;
1404 else
1405 iqk_info->syn1to2 = 0x0;
1406
1407 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852A_IQK_VER);
1408 rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x000f << (path * 16),
1409 (u8)iqk_info->iqk_band[path]);
1410 rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x00f0 << (path * 16),
1411 (u8)iqk_info->iqk_bw[path]);
1412 rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0xff00 << (path * 16),
1413 (u8)iqk_info->iqk_ch[path]);
1414
1415 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x000000ff, RTW8852A_NCTL_VER);
1416 }
1417
_iqk_start_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1418 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1419 u8 path)
1420 {
1421 _iqk_by_path(rtwdev, phy_idx, path);
1422 }
1423
_iqk_restore(struct rtw89_dev * rtwdev,u8 path)1424 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1425 {
1426 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1427
1428 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
1429 iqk_info->nb_txcfir[path]);
1430 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
1431 iqk_info->nb_rxcfir[path]);
1432 rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD);
1433 rtw89_phy_write32_clr(rtwdev, R_MDPK_RX_DCK, MASKDWORD);
1434 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1435 rtw89_phy_write32_clr(rtwdev, R_KPATH_CFG, MASKDWORD);
1436 rtw89_phy_write32_clr(rtwdev, R_GAPK, B_GAPK_ADR);
1437 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000);
1438 rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN);
1439 rtw89_phy_write32_mask(rtwdev, R_CFIR_MAP + (path << 8), MASKDWORD, 0xe4e4e4e4);
1440 rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1441 rtw89_phy_write32_clr(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW);
1442 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD, 0x00000002);
1443 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1444 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x0);
1445 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1446 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1447 rtw89_write_rf(rtwdev, path, RR_TXRSV, RR_TXRSV_GAPK, 0x0);
1448 rtw89_write_rf(rtwdev, path, RR_BIAS, RR_BIAS_GAPK, 0x0);
1449 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1450 }
1451
_iqk_afebb_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1452 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1453 enum rtw89_phy_idx phy_idx, u8 path)
1454 {
1455 const struct rtw89_rfk_tbl *tbl;
1456
1457 switch (_kpath(rtwdev, phy_idx)) {
1458 case RF_A:
1459 tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path0_tbl;
1460 break;
1461 case RF_B:
1462 tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path1_tbl;
1463 break;
1464 default:
1465 tbl = &rtw8852a_rfk_iqk_restore_defs_nondbcc_path01_tbl;
1466 break;
1467 }
1468
1469 rtw89_rfk_parser(rtwdev, tbl);
1470 }
1471
_iqk_preset(struct rtw89_dev * rtwdev,u8 path)1472 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1473 {
1474 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1475 u8 idx = iqk_info->iqk_table_idx[path];
1476
1477 if (rtwdev->dbcc_en) {
1478 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
1479 B_COEF_SEL_IQC, path & 0x1);
1480 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1481 B_CFIR_LUT_G2, path & 0x1);
1482 } else {
1483 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
1484 B_COEF_SEL_IQC, idx);
1485 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1486 B_CFIR_LUT_G2, idx);
1487 }
1488 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1489 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1490 rtw89_phy_write32_clr(rtwdev, R_NCTL_RW, MASKDWORD);
1491 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1492 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, MASKDWORD, 0x00200000);
1493 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000);
1494 rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD);
1495 }
1496
_iqk_macbb_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1497 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1498 enum rtw89_phy_idx phy_idx, u8 path)
1499 {
1500 const struct rtw89_rfk_tbl *tbl;
1501
1502 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
1503
1504 switch (_kpath(rtwdev, phy_idx)) {
1505 case RF_A:
1506 tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path0_tbl;
1507 break;
1508 case RF_B:
1509 tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path1_tbl;
1510 break;
1511 default:
1512 tbl = &rtw8852a_rfk_iqk_set_defs_nondbcc_path01_tbl;
1513 break;
1514 }
1515
1516 rtw89_rfk_parser(rtwdev, tbl);
1517 }
1518
_iqk_dbcc(struct rtw89_dev * rtwdev,u8 path)1519 static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
1520 {
1521 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1522 u8 phy_idx = 0x0;
1523
1524 iqk_info->iqk_times++;
1525
1526 if (path == 0x0)
1527 phy_idx = RTW89_PHY_0;
1528 else
1529 phy_idx = RTW89_PHY_1;
1530
1531 _iqk_get_ch_info(rtwdev, phy_idx, path);
1532 _iqk_macbb_setting(rtwdev, phy_idx, path);
1533 _iqk_preset(rtwdev, path);
1534 _iqk_start_iqk(rtwdev, phy_idx, path);
1535 _iqk_restore(rtwdev, path);
1536 _iqk_afebb_restore(rtwdev, phy_idx, path);
1537 }
1538
_iqk_track(struct rtw89_dev * rtwdev)1539 static void _iqk_track(struct rtw89_dev *rtwdev)
1540 {
1541 struct rtw89_iqk_info *iqk = &rtwdev->iqk;
1542 u8 path = 0x0;
1543 u8 cur_ther;
1544
1545 if (iqk->iqk_band[0] == RTW89_BAND_2G)
1546 return;
1547 if (iqk->iqk_bw[0] < RTW89_CHANNEL_WIDTH_80)
1548 return;
1549
1550 /* only check path 0 */
1551 for (path = 0; path < 1; path++) {
1552 cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
1553
1554 if (abs(cur_ther - iqk->thermal[path]) > RTW8852A_IQK_THR_REK)
1555 iqk->thermal_rek_en = true;
1556 else
1557 iqk->thermal_rek_en = false;
1558 }
1559 }
1560
_rck(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)1561 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
1562 {
1563 u32 rf_reg5, rck_val = 0;
1564 u32 val;
1565 int ret;
1566
1567 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
1568
1569 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
1570
1571 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1572 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1573
1574 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
1575 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
1576
1577 /* RCK trigger */
1578 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
1579
1580 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
1581 false, rtwdev, path, 0x1c, BIT(3));
1582 if (ret)
1583 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
1584
1585 rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
1586 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
1587
1588 /* RCK_ADC_OFFSET */
1589 rtw89_write_rf(rtwdev, path, RR_RCKO, RR_RCKO_OFF, 0x4);
1590
1591 rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x1);
1592 rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x0);
1593
1594 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
1595
1596 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1597 "[RCK] RF 0x1b / 0x1c / 0x1d = 0x%x / 0x%x / 0x%x\n",
1598 rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
1599 rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK),
1600 rtw89_read_rf(rtwdev, path, RR_RCKO, RFREG_MASK));
1601 }
1602
_iqk_init(struct rtw89_dev * rtwdev)1603 static void _iqk_init(struct rtw89_dev *rtwdev)
1604 {
1605 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1606 u8 ch, path;
1607
1608 rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
1609 if (iqk_info->is_iqk_init)
1610 return;
1611
1612 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1613 iqk_info->is_iqk_init = true;
1614 iqk_info->is_nbiqk = false;
1615 iqk_info->iqk_fft_en = false;
1616 iqk_info->iqk_sram_en = false;
1617 iqk_info->iqk_cfir_en = false;
1618 iqk_info->iqk_xym_en = false;
1619 iqk_info->thermal_rek_en = false;
1620 iqk_info->iqk_times = 0x0;
1621
1622 for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
1623 iqk_info->iqk_channel[ch] = 0x0;
1624 for (path = 0; path < RTW8852A_IQK_SS; path++) {
1625 iqk_info->lok_cor_fail[ch][path] = false;
1626 iqk_info->lok_fin_fail[ch][path] = false;
1627 iqk_info->iqk_tx_fail[ch][path] = false;
1628 iqk_info->iqk_rx_fail[ch][path] = false;
1629 iqk_info->iqk_mcc_ch[ch][path] = 0x0;
1630 iqk_info->iqk_table_idx[path] = 0x0;
1631 }
1632 }
1633 }
1634
_doiqk(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy_idx,u8 path)1635 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1636 enum rtw89_phy_idx phy_idx, u8 path)
1637 {
1638 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1639 u32 backup_bb_val[BACKUP_BB_REGS_NR];
1640 u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR];
1641 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
1642
1643 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1644
1645 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1646 "[IQK]==========IQK strat!!!!!==========\n");
1647 iqk_info->iqk_times++;
1648 iqk_info->kcount = 0;
1649 iqk_info->version = RTW8852A_IQK_VER;
1650
1651 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1652 _iqk_get_ch_info(rtwdev, phy_idx, path);
1653 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
1654 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1655 _iqk_macbb_setting(rtwdev, phy_idx, path);
1656 _iqk_preset(rtwdev, path);
1657 _iqk_start_iqk(rtwdev, phy_idx, path);
1658 _iqk_restore(rtwdev, path);
1659 _iqk_afebb_restore(rtwdev, phy_idx, path);
1660 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
1661 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1662 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1663 }
1664
_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,bool force)1665 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
1666 {
1667 switch (_kpath(rtwdev, phy_idx)) {
1668 case RF_A:
1669 _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1670 break;
1671 case RF_B:
1672 _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1673 break;
1674 case RF_AB:
1675 _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1676 _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1677 break;
1678 default:
1679 break;
1680 }
1681 }
1682
1683 #define RXDCK_VER_8852A 0xe
1684
_set_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,bool is_afe)1685 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1686 enum rtw89_rf_path path, bool is_afe)
1687 {
1688 u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
1689 u32 ori_val;
1690
1691 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1692 "[RX_DCK] ==== S%d RX DCK (by %s)====\n",
1693 path, is_afe ? "AFE" : "RFC");
1694
1695 ori_val = rtw89_phy_read32_mask(rtwdev, R_P0_RXCK + (path << 13), MASKDWORD);
1696
1697 if (is_afe) {
1698 rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
1699 rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
1700 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1701 B_P0_RXCK_VAL, 0x3);
1702 rtw89_phy_write32_set(rtwdev, R_S0_RXDC2 + (path << 13), B_S0_RXDC2_MEN);
1703 rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2 + (path << 13),
1704 B_S0_RXDC2_AVG, 0x3);
1705 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
1706 rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK);
1707 rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST);
1708 rtw89_phy_write32_set(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST);
1709 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_CRXBB, 0x1);
1710 }
1711
1712 rtw89_write_rf(rtwdev, path, RR_DCK2, RR_DCK2_CYCLE, 0x3f);
1713 rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_SEL, is_afe);
1714
1715 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_START);
1716
1717 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1718 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
1719
1720 fsleep(600);
1721
1722 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_STOP);
1723
1724 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1725
1726 if (is_afe) {
1727 rtw89_phy_write32_clr(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
1728 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1729 MASKDWORD, ori_val);
1730 }
1731 }
1732
_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool is_afe)1733 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1734 bool is_afe)
1735 {
1736 u8 path, kpath, dck_tune;
1737 u32 rf_reg5;
1738 u32 addr;
1739
1740 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1741 "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
1742 RXDCK_VER_8852A, rtwdev->hal.cv);
1743
1744 kpath = _kpath(rtwdev, phy);
1745
1746 for (path = 0; path < 2; path++) {
1747 if (!(kpath & BIT(path)))
1748 continue;
1749
1750 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
1751 dck_tune = (u8)rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
1752
1753 if (rtwdev->is_tssi_mode[path]) {
1754 addr = 0x5818 + (path << 13);
1755 /* TSSI pause */
1756 rtw89_phy_write32_set(rtwdev, addr, BIT(30));
1757 }
1758
1759 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1760 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
1761 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1762 _set_rx_dck(rtwdev, phy, path, is_afe);
1763 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
1764 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
1765
1766 if (rtwdev->is_tssi_mode[path]) {
1767 addr = 0x5818 + (path << 13);
1768 /* TSSI resume */
1769 rtw89_phy_write32_clr(rtwdev, addr, BIT(30));
1770 }
1771 }
1772 }
1773
1774 #define RTW8852A_RF_REL_VERSION 34
1775 #define RTW8852A_DPK_VER 0x10
1776 #define RTW8852A_DPK_TH_AVG_NUM 4
1777 #define RTW8852A_DPK_RF_PATH 2
1778 #define RTW8852A_DPK_KIP_REG_NUM 2
1779
1780 enum rtw8852a_dpk_id {
1781 LBK_RXIQK = 0x06,
1782 SYNC = 0x10,
1783 MDPK_IDL = 0x11,
1784 MDPK_MPA = 0x12,
1785 GAIN_LOSS = 0x13,
1786 GAIN_CAL = 0x14,
1787 };
1788
_rf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)1789 static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
1790 enum rtw89_rf_path path, bool is_bybb)
1791 {
1792 if (is_bybb)
1793 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1794 else
1795 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1796 }
1797
1798 static void _dpk_onoff(struct rtw89_dev *rtwdev,
1799 enum rtw89_rf_path path, bool off);
1800
_dpk_bkup_kip(struct rtw89_dev * rtwdev,u32 * reg,u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM],u8 path)1801 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, u32 *reg,
1802 u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM],
1803 u8 path)
1804 {
1805 u8 i;
1806
1807 for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) {
1808 reg_bkup[path][i] = rtw89_phy_read32_mask(rtwdev,
1809 reg[i] + (path << 8),
1810 MASKDWORD);
1811 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
1812 reg[i] + (path << 8), reg_bkup[path][i]);
1813 }
1814 }
1815
_dpk_reload_kip(struct rtw89_dev * rtwdev,u32 * reg,u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM],u8 path)1816 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg,
1817 u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM], u8 path)
1818 {
1819 u8 i;
1820
1821 for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) {
1822 rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
1823 MASKDWORD, reg_bkup[path][i]);
1824 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
1825 reg[i] + (path << 8), reg_bkup[path][i]);
1826 }
1827 }
1828
_dpk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw8852a_dpk_id id)1829 static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1830 enum rtw89_rf_path path, enum rtw8852a_dpk_id id)
1831 {
1832 u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
1833 u16 dpk_cmd = 0x0;
1834 u32 val;
1835 int ret;
1836
1837 dpk_cmd = (u16)((id << 8) | (0x19 + (path << 4)));
1838
1839 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_START);
1840
1841 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1842 rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN);
1843
1844 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1845 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
1846
1847 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
1848
1849 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_STOP);
1850
1851 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1852 "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
1853 id == 0x06 ? "LBK_RXIQK" :
1854 id == 0x10 ? "SYNC" :
1855 id == 0x11 ? "MDPK_IDL" :
1856 id == 0x12 ? "MDPK_MPA" :
1857 id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
1858 dpk_cmd, ret);
1859
1860 if (ret) {
1861 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1862 "[DPK] one-shot over 20ms!!!!\n");
1863 return 1;
1864 }
1865
1866 return 0;
1867 }
1868
_dpk_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1869 static void _dpk_rx_dck(struct rtw89_dev *rtwdev,
1870 enum rtw89_phy_idx phy,
1871 enum rtw89_rf_path path)
1872 {
1873 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
1874 _set_rx_dck(rtwdev, phy, path, false);
1875 }
1876
_dpk_information(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1877 static void _dpk_information(struct rtw89_dev *rtwdev,
1878 enum rtw89_phy_idx phy,
1879 enum rtw89_rf_path path)
1880 {
1881 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1882 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1883 u8 kidx = dpk->cur_idx[path];
1884
1885 dpk->bp[path][kidx].band = chan->band_type;
1886 dpk->bp[path][kidx].ch = chan->channel;
1887 dpk->bp[path][kidx].bw = chan->band_width;
1888
1889 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1890 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1891 path, dpk->cur_idx[path], phy,
1892 rtwdev->is_tssi_mode[path] ? "on" : "off",
1893 rtwdev->dbcc_en ? "on" : "off",
1894 dpk->bp[path][kidx].band == 0 ? "2G" :
1895 dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1896 dpk->bp[path][kidx].ch,
1897 dpk->bp[path][kidx].bw == 0 ? "20M" :
1898 dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1899 }
1900
_dpk_bb_afe_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath)1901 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
1902 enum rtw89_phy_idx phy,
1903 enum rtw89_rf_path path, u8 kpath)
1904 {
1905 switch (kpath) {
1906 case RF_A:
1907 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_a_tbl);
1908
1909 if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x0)
1910 rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
1911
1912 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_a_tbl);
1913 break;
1914 case RF_B:
1915 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_b_tbl);
1916
1917 if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x1)
1918 rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
1919
1920 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_b_tbl);
1921 break;
1922 case RF_AB:
1923 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_s_defs_ab_tbl);
1924 break;
1925 default:
1926 break;
1927 }
1928 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1929 "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1930 }
1931
_dpk_bb_afe_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath)1932 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
1933 enum rtw89_phy_idx phy,
1934 enum rtw89_rf_path path, u8 kpath)
1935 {
1936 switch (kpath) {
1937 case RF_A:
1938 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_a_tbl);
1939 break;
1940 case RF_B:
1941 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_b_tbl);
1942 break;
1943 case RF_AB:
1944 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_ab_tbl);
1945 break;
1946 default:
1947 break;
1948 }
1949 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1950 "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1951 }
1952
_dpk_tssi_pause(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_pause)1953 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1954 enum rtw89_rf_path path, bool is_pause)
1955 {
1956 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1957 B_P0_TSSI_TRK_EN, is_pause);
1958
1959 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1960 is_pause ? "pause" : "resume");
1961 }
1962
_dpk_kip_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)1963 static void _dpk_kip_setting(struct rtw89_dev *rtwdev,
1964 enum rtw89_rf_path path, u8 kidx)
1965 {
1966 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1967 rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x00093f3f);
1968 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
1969 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
1970 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG, B_DPK_CFG_IDX, 0x2);
1971 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path); /*subpage_id*/
1972 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8) + (kidx << 2),
1973 MASKDWORD, 0x003f2e2e);
1974 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
1975 MASKDWORD, 0x005b5b5b);
1976
1977 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP setting for S%d[%d]!!\n",
1978 path, kidx);
1979 }
1980
_dpk_kip_restore(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)1981 static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
1982 enum rtw89_rf_path path)
1983 {
1984 rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD);
1985 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1986 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000);
1987 rtw89_phy_write32_clr(rtwdev, R_KIP_CLK, MASKDWORD);
1988
1989 if (rtwdev->hal.cv > CHIP_CBV)
1990 rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), BIT(15), 0x1);
1991
1992 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
1993 }
1994
_dpk_lbk_rxiqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1995 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
1996 enum rtw89_phy_idx phy,
1997 enum rtw89_rf_path path)
1998 {
1999 u8 cur_rxbb;
2000
2001 cur_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
2002
2003 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_f_tbl);
2004
2005 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
2006 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
2007 rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x2);
2008 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK,
2009 rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK));
2010 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
2011 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
2012 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
2013
2014 fsleep(70);
2015
2016 rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTL, 0x1f);
2017
2018 if (cur_rxbb <= 0xa)
2019 rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x3);
2020 else if (cur_rxbb <= 0x10 && cur_rxbb >= 0xb)
2021 rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x1);
2022 else
2023 rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x0);
2024
2025 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
2026
2027 _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
2028
2029 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
2030 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
2031
2032 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
2033 rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x0);
2034 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); /*POW IQKPLL*/
2035 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_DPK);
2036
2037 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_r_tbl);
2038 }
2039
_dpk_get_thermal(struct rtw89_dev * rtwdev,u8 kidx,enum rtw89_rf_path path)2040 static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx,
2041 enum rtw89_rf_path path)
2042 {
2043 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2044
2045 dpk->bp[path][kidx].ther_dpk =
2046 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2047
2048 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
2049 dpk->bp[path][kidx].ther_dpk);
2050 }
2051
_dpk_set_tx_pwr(struct rtw89_dev * rtwdev,u8 gain,enum rtw89_rf_path path)2052 static u8 _dpk_set_tx_pwr(struct rtw89_dev *rtwdev, u8 gain,
2053 enum rtw89_rf_path path)
2054 {
2055 u8 txagc_ori = 0x38;
2056
2057 rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc_ori);
2058
2059 return txagc_ori;
2060 }
2061
_dpk_rf_setting(struct rtw89_dev * rtwdev,u8 gain,enum rtw89_rf_path path,u8 kidx)2062 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
2063 enum rtw89_rf_path path, u8 kidx)
2064 {
2065 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2066
2067 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
2068 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x280b);
2069 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x0);
2070 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
2071 rtw89_write_rf(rtwdev, path, RR_MIXER, RR_MIXER_GN, 0x0);
2072 } else {
2073 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x282e);
2074 rtw89_write_rf(rtwdev, path, RR_BIASA2, RR_BIASA2_LB, 0x7);
2075 rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW, 0x3);
2076 rtw89_write_rf(rtwdev, path, RR_RXA, RR_RXA_DPK, 0x3);
2077 }
2078 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
2079 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
2080 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
2081
2082 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2083 "[DPK] RF 0x0/0x1/0x1a = 0x%x/ 0x%x/ 0x%x\n",
2084 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
2085 rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK),
2086 rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
2087 }
2088
_dpk_manual_txcfir(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_manual)2089 static void _dpk_manual_txcfir(struct rtw89_dev *rtwdev,
2090 enum rtw89_rf_path path, bool is_manual)
2091 {
2092 u8 tmp_pad, tmp_txbb;
2093
2094 if (is_manual) {
2095 rtw89_phy_write32_mask(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN, 0x1);
2096 tmp_pad = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_PAD);
2097 rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8),
2098 B_RFGAIN_PAD, tmp_pad);
2099
2100 tmp_txbb = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_BB);
2101 rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8),
2102 B_RFGAIN_TXBB, tmp_txbb);
2103
2104 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8),
2105 B_LOAD_COEF_CFIR, 0x1);
2106 rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8),
2107 B_LOAD_COEF_CFIR);
2108
2109 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), BIT(1), 0x1);
2110
2111 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2112 "[DPK] PAD_man / TXBB_man = 0x%x / 0x%x\n", tmp_pad,
2113 tmp_txbb);
2114 } else {
2115 rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN);
2116 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2117 "[DPK] disable manual switch TXCFIR\n");
2118 }
2119 }
2120
_dpk_bypass_rxcfir(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bypass)2121 static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
2122 enum rtw89_rf_path path, bool is_bypass)
2123 {
2124 if (is_bypass) {
2125 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2126 B_RXIQC_BYPASS2, 0x1);
2127 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2128 B_RXIQC_BYPASS, 0x1);
2129 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2130 "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
2131 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
2132 MASKDWORD));
2133 } else {
2134 rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
2135 rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
2136 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2137 "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
2138 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
2139 MASKDWORD));
2140 }
2141 }
2142
2143 static
_dpk_tpg_sel(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)2144 void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2145 {
2146 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2147
2148 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
2149 rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
2150 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
2151 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
2152 else
2153 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
2154
2155 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
2156 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
2157 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
2158 }
2159
_dpk_table_select(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx,u8 gain)2160 static void _dpk_table_select(struct rtw89_dev *rtwdev,
2161 enum rtw89_rf_path path, u8 kidx, u8 gain)
2162 {
2163 u8 val;
2164
2165 val = 0x80 + kidx * 0x20 + gain * 0x10;
2166 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
2167 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2168 "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
2169 gain, val);
2170 }
2171
_dpk_sync_check(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)2172 static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
2173 enum rtw89_rf_path path)
2174 {
2175 #define DPK_SYNC_TH_DC_I 200
2176 #define DPK_SYNC_TH_DC_Q 200
2177 #define DPK_SYNC_TH_CORR 170
2178 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2179 u16 dc_i, dc_q;
2180 u8 corr_val, corr_idx;
2181
2182 rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
2183
2184 corr_idx = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
2185 corr_val = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
2186
2187 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2188 "[DPK] S%d Corr_idx / Corr_val = %d / %d\n", path, corr_idx,
2189 corr_val);
2190
2191 dpk->corr_idx[path][0] = corr_idx;
2192 dpk->corr_val[path][0] = corr_val;
2193
2194 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
2195
2196 dc_i = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2197 dc_q = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
2198
2199 dc_i = abs(sign_extend32(dc_i, 11));
2200 dc_q = abs(sign_extend32(dc_q, 11));
2201
2202 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
2203 path, dc_i, dc_q);
2204
2205 dpk->dc_i[path][0] = dc_i;
2206 dpk->dc_q[path][0] = dc_q;
2207
2208 if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2209 corr_val < DPK_SYNC_TH_CORR)
2210 return true;
2211 else
2212 return false;
2213 }
2214
_dpk_sync(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2215 static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2216 enum rtw89_rf_path path, u8 kidx)
2217 {
2218 _dpk_tpg_sel(rtwdev, path, kidx);
2219 _dpk_one_shot(rtwdev, phy, path, SYNC);
2220 return _dpk_sync_check(rtwdev, path); /*1= fail*/
2221 }
2222
_dpk_dgain_read(struct rtw89_dev * rtwdev)2223 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2224 {
2225 u16 dgain = 0x0;
2226
2227 rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
2228
2229 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
2230
2231 dgain = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2232
2233 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain,
2234 dgain);
2235
2236 return dgain;
2237 }
2238
_dpk_dgain_mapping(struct rtw89_dev * rtwdev,u16 dgain)2239 static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
2240 {
2241 s8 offset;
2242
2243 if (dgain >= 0x783)
2244 offset = 0x6;
2245 else if (dgain <= 0x782 && dgain >= 0x551)
2246 offset = 0x3;
2247 else if (dgain <= 0x550 && dgain >= 0x3c4)
2248 offset = 0x0;
2249 else if (dgain <= 0x3c3 && dgain >= 0x2aa)
2250 offset = -3;
2251 else if (dgain <= 0x2a9 && dgain >= 0x1e3)
2252 offset = -6;
2253 else if (dgain <= 0x1e2 && dgain >= 0x156)
2254 offset = -9;
2255 else if (dgain <= 0x155)
2256 offset = -12;
2257 else
2258 offset = 0x0;
2259
2260 return offset;
2261 }
2262
_dpk_gainloss_read(struct rtw89_dev * rtwdev)2263 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2264 {
2265 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2266 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2267 return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2268 }
2269
_dpk_gainloss(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2270 static void _dpk_gainloss(struct rtw89_dev *rtwdev,
2271 enum rtw89_phy_idx phy, enum rtw89_rf_path path,
2272 u8 kidx)
2273 {
2274 _dpk_table_select(rtwdev, path, kidx, 1);
2275 _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
2276 }
2277
2278 #define DPK_TXAGC_LOWER 0x2e
2279 #define DPK_TXAGC_UPPER 0x3f
2280 #define DPK_TXAGC_INVAL 0xff
2281
_dpk_set_offset(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,s8 gain_offset)2282 static u8 _dpk_set_offset(struct rtw89_dev *rtwdev,
2283 enum rtw89_rf_path path, s8 gain_offset)
2284 {
2285 u8 txagc;
2286
2287 txagc = (u8)rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK);
2288
2289 if (txagc - gain_offset < DPK_TXAGC_LOWER)
2290 txagc = DPK_TXAGC_LOWER;
2291 else if (txagc - gain_offset > DPK_TXAGC_UPPER)
2292 txagc = DPK_TXAGC_UPPER;
2293 else
2294 txagc = txagc - gain_offset;
2295
2296 rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc);
2297
2298 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
2299 gain_offset, txagc);
2300 return txagc;
2301 }
2302
2303 enum dpk_agc_step {
2304 DPK_AGC_STEP_SYNC_DGAIN,
2305 DPK_AGC_STEP_GAIN_ADJ,
2306 DPK_AGC_STEP_GAIN_LOSS_IDX,
2307 DPK_AGC_STEP_GL_GT_CRITERION,
2308 DPK_AGC_STEP_GL_LT_CRITERION,
2309 DPK_AGC_STEP_SET_TX_GAIN,
2310 };
2311
_dpk_pas_read(struct rtw89_dev * rtwdev,bool is_check)2312 static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
2313 {
2314 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2315 u8 i;
2316
2317 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_pas_read_defs_tbl);
2318
2319 if (is_check) {
2320 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2321 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2322 val1_i = abs(sign_extend32(val1_i, 11));
2323 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2324 val1_q = abs(sign_extend32(val1_q, 11));
2325 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2326 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2327 val2_i = abs(sign_extend32(val2_i, 11));
2328 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2329 val2_q = abs(sign_extend32(val2_q, 11));
2330
2331 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2332 phy_div(val1_i * val1_i + val1_q * val1_q,
2333 val2_i * val2_i + val2_q * val2_q));
2334
2335 } else {
2336 for (i = 0; i < 32; i++) {
2337 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2338 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2339 "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2340 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2341 }
2342 }
2343 if ((val1_i * val1_i + val1_q * val1_q) >=
2344 ((val2_i * val2_i + val2_q * val2_q) * 8 / 5))
2345 return 1;
2346 else
2347 return 0;
2348 }
2349
_dpk_agc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 init_txagc,bool loss_only)2350 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2351 enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
2352 bool loss_only)
2353 {
2354 #define DPK_AGC_ADJ_LMT 6
2355 #define DPK_DGAIN_UPPER 1922
2356 #define DPK_DGAIN_LOWER 342
2357 #define DPK_RXBB_UPPER 0x1f
2358 #define DPK_RXBB_LOWER 0
2359 #define DPK_GL_CRIT 7
2360 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2361 u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
2362 u8 agc_cnt = 0;
2363 bool limited_rxbb = false;
2364 s8 offset = 0;
2365 u16 dgain = 0;
2366 u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2367 bool goout = false;
2368
2369 tmp_txagc = init_txagc;
2370
2371 do {
2372 switch (step) {
2373 case DPK_AGC_STEP_SYNC_DGAIN:
2374 if (_dpk_sync(rtwdev, phy, path, kidx)) {
2375 tmp_txagc = DPK_TXAGC_INVAL;
2376 goout = true;
2377 break;
2378 }
2379
2380 dgain = _dpk_dgain_read(rtwdev);
2381
2382 if (loss_only || limited_rxbb)
2383 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2384 else
2385 step = DPK_AGC_STEP_GAIN_ADJ;
2386 break;
2387
2388 case DPK_AGC_STEP_GAIN_ADJ:
2389 tmp_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
2390 offset = _dpk_dgain_mapping(rtwdev, dgain);
2391
2392 if (tmp_rxbb + offset > DPK_RXBB_UPPER) {
2393 tmp_rxbb = DPK_RXBB_UPPER;
2394 limited_rxbb = true;
2395 } else if (tmp_rxbb + offset < DPK_RXBB_LOWER) {
2396 tmp_rxbb = DPK_RXBB_LOWER;
2397 limited_rxbb = true;
2398 } else {
2399 tmp_rxbb = tmp_rxbb + offset;
2400 }
2401
2402 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
2403 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2404 "[DPK] Adjust RXBB (%d) = 0x%x\n", offset,
2405 tmp_rxbb);
2406 if (offset != 0 || agc_cnt == 0) {
2407 if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
2408 _dpk_bypass_rxcfir(rtwdev, path, true);
2409 else
2410 _dpk_lbk_rxiqk(rtwdev, phy, path);
2411 }
2412 if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER)
2413 step = DPK_AGC_STEP_SYNC_DGAIN;
2414 else
2415 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2416
2417 agc_cnt++;
2418 break;
2419
2420 case DPK_AGC_STEP_GAIN_LOSS_IDX:
2421 _dpk_gainloss(rtwdev, phy, path, kidx);
2422 tmp_gl_idx = _dpk_gainloss_read(rtwdev);
2423
2424 if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
2425 tmp_gl_idx > DPK_GL_CRIT)
2426 step = DPK_AGC_STEP_GL_GT_CRITERION;
2427 else if (tmp_gl_idx == 0)
2428 step = DPK_AGC_STEP_GL_LT_CRITERION;
2429 else
2430 step = DPK_AGC_STEP_SET_TX_GAIN;
2431 break;
2432
2433 case DPK_AGC_STEP_GL_GT_CRITERION:
2434 if (tmp_txagc == DPK_TXAGC_LOWER) {
2435 goout = true;
2436 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2437 "[DPK] Txagc@lower bound!!\n");
2438 } else {
2439 tmp_txagc = _dpk_set_offset(rtwdev, path, 3);
2440 }
2441 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2442 agc_cnt++;
2443 break;
2444
2445 case DPK_AGC_STEP_GL_LT_CRITERION:
2446 if (tmp_txagc == DPK_TXAGC_UPPER) {
2447 goout = true;
2448 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2449 "[DPK] Txagc@upper bound!!\n");
2450 } else {
2451 tmp_txagc = _dpk_set_offset(rtwdev, path, -2);
2452 }
2453 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2454 agc_cnt++;
2455 break;
2456
2457 case DPK_AGC_STEP_SET_TX_GAIN:
2458 tmp_txagc = _dpk_set_offset(rtwdev, path, tmp_gl_idx);
2459 goout = true;
2460 agc_cnt++;
2461 break;
2462
2463 default:
2464 goout = true;
2465 break;
2466 }
2467 } while (!goout && (agc_cnt < DPK_AGC_ADJ_LMT));
2468
2469 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2470 "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
2471 tmp_rxbb);
2472
2473 return tmp_txagc;
2474 }
2475
_dpk_set_mdpd_para(struct rtw89_dev * rtwdev,u8 order)2476 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
2477 {
2478 switch (order) {
2479 case 0:
2480 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2481 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
2482 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
2483 break;
2484 case 1:
2485 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2486 rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2487 rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2488 break;
2489 case 2:
2490 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2491 rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2492 rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2493 break;
2494 default:
2495 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2496 "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2497 break;
2498 }
2499
2500 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2501 "[DPK] Set MDPD order to 0x%x for IDL\n", order);
2502 }
2503
_dpk_idl_mpa(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 gain)2504 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2505 enum rtw89_rf_path path, u8 kidx, u8 gain)
2506 {
2507 _dpk_set_mdpd_para(rtwdev, 0x0);
2508 _dpk_table_select(rtwdev, path, kidx, 1);
2509 _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
2510 }
2511
_dpk_fill_result(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx,u8 gain,u8 txagc)2512 static void _dpk_fill_result(struct rtw89_dev *rtwdev,
2513 enum rtw89_rf_path path, u8 kidx, u8 gain,
2514 u8 txagc)
2515 {
2516 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2517
2518 u16 pwsf = 0x78;
2519 u8 gs = 0x5b;
2520
2521 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
2522
2523 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2524 "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
2525 pwsf, gs);
2526
2527 dpk->bp[path][kidx].txagc_dpk = txagc;
2528 rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
2529 0x3F << ((gain << 3) + (kidx << 4)), txagc);
2530
2531 dpk->bp[path][kidx].pwsf = pwsf;
2532 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2533 0x1FF << (gain << 4), pwsf);
2534
2535 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2536 rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD);
2537
2538 dpk->bp[path][kidx].gs = gs;
2539 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2540 MASKDWORD, 0x065b5b5b);
2541
2542 rtw89_phy_write32_clr(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD);
2543
2544 rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL);
2545 }
2546
_dpk_reload_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2547 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2548 enum rtw89_rf_path path)
2549 {
2550 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2551 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2552 bool is_reload = false;
2553 u8 idx, cur_band, cur_ch;
2554
2555 cur_band = chan->band_type;
2556 cur_ch = chan->channel;
2557
2558 for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2559 if (cur_band != dpk->bp[path][idx].band ||
2560 cur_ch != dpk->bp[path][idx].ch)
2561 continue;
2562
2563 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2564 B_COEF_SEL_MDPD, idx);
2565 dpk->cur_idx[path] = idx;
2566 is_reload = true;
2567 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2568 "[DPK] reload S%d[%d] success\n", path, idx);
2569 }
2570
2571 return is_reload;
2572 }
2573
_dpk_main(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 gain)2574 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2575 enum rtw89_rf_path path, u8 gain)
2576 {
2577 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2578 u8 txagc = 0, kidx = dpk->cur_idx[path];
2579 bool is_fail = false;
2580
2581 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2582 "[DPK] ========= S%d[%d] DPK Start =========\n", path,
2583 kidx);
2584
2585 _rf_direct_cntrl(rtwdev, path, false);
2586 txagc = _dpk_set_tx_pwr(rtwdev, gain, path);
2587 _dpk_rf_setting(rtwdev, gain, path, kidx);
2588 _dpk_rx_dck(rtwdev, phy, path);
2589
2590 _dpk_kip_setting(rtwdev, path, kidx);
2591 _dpk_manual_txcfir(rtwdev, path, true);
2592 txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
2593 if (txagc == DPK_TXAGC_INVAL)
2594 is_fail = true;
2595 _dpk_get_thermal(rtwdev, kidx, path);
2596
2597 _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
2598 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
2599 _dpk_fill_result(rtwdev, path, kidx, gain, txagc);
2600 _dpk_manual_txcfir(rtwdev, path, false);
2601
2602 if (!is_fail)
2603 dpk->bp[path][kidx].path_ok = true;
2604 else
2605 dpk->bp[path][kidx].path_ok = false;
2606
2607 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2608 is_fail ? "Check" : "Success");
2609
2610 return is_fail;
2611 }
2612
_dpk_cal_select(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy,u8 kpath)2613 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
2614 enum rtw89_phy_idx phy, u8 kpath)
2615 {
2616 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2617 u32 backup_bb_val[BACKUP_BB_REGS_NR];
2618 u32 backup_rf_val[RTW8852A_DPK_RF_PATH][BACKUP_RF_REGS_NR];
2619 u32 kip_bkup[RTW8852A_DPK_RF_PATH][RTW8852A_DPK_KIP_REG_NUM] = {{0}};
2620 u32 kip_reg[] = {R_RXIQC, R_IQK_RES};
2621 u8 path;
2622 bool is_fail = true, reloaded[RTW8852A_DPK_RF_PATH] = {false};
2623
2624 if (dpk->is_dpk_reload_en) {
2625 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2626 if (!(kpath & BIT(path)))
2627 continue;
2628
2629 reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
2630 if (!reloaded[path] && dpk->bp[path][0].ch != 0)
2631 dpk->cur_idx[path] = !dpk->cur_idx[path];
2632 else
2633 _dpk_onoff(rtwdev, path, false);
2634 }
2635 } else {
2636 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++)
2637 dpk->cur_idx[path] = 0;
2638 }
2639
2640 if ((kpath == RF_A && reloaded[RF_PATH_A]) ||
2641 (kpath == RF_B && reloaded[RF_PATH_B]) ||
2642 (kpath == RF_AB && reloaded[RF_PATH_A] && reloaded[RF_PATH_B]))
2643 return;
2644
2645 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
2646
2647 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2648 if (!(kpath & BIT(path)) || reloaded[path])
2649 continue;
2650 if (rtwdev->is_tssi_mode[path])
2651 _dpk_tssi_pause(rtwdev, path, true);
2652 _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
2653 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2654 _dpk_information(rtwdev, phy, path);
2655 }
2656
2657 _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
2658
2659 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2660 if (!(kpath & BIT(path)) || reloaded[path])
2661 continue;
2662
2663 is_fail = _dpk_main(rtwdev, phy, path, 1);
2664 _dpk_onoff(rtwdev, path, is_fail);
2665 }
2666
2667 _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
2668 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
2669
2670 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2671 if (!(kpath & BIT(path)) || reloaded[path])
2672 continue;
2673
2674 _dpk_kip_restore(rtwdev, path);
2675 _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
2676 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2677 if (rtwdev->is_tssi_mode[path])
2678 _dpk_tssi_pause(rtwdev, path, false);
2679 }
2680 }
2681
_dpk_bypass_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2682 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2683 {
2684 struct rtw89_fem_info *fem = &rtwdev->fem;
2685 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2686
2687 if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
2688 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2689 "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2690 return true;
2691 } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
2692 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2693 "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2694 return true;
2695 }
2696
2697 return false;
2698 }
2699
_dpk_force_bypass(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2700 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2701 {
2702 u8 path, kpath;
2703
2704 kpath = _kpath(rtwdev, phy);
2705
2706 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2707 if (kpath & BIT(path))
2708 _dpk_onoff(rtwdev, path, true);
2709 }
2710 }
2711
_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool force)2712 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
2713 {
2714 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2715 "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
2716 RTW8852A_DPK_VER, rtwdev->hal.cv,
2717 RTW8852A_RF_REL_VERSION);
2718
2719 if (_dpk_bypass_check(rtwdev, phy))
2720 _dpk_force_bypass(rtwdev, phy);
2721 else
2722 _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
2723 }
2724
_dpk_onoff(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool off)2725 static void _dpk_onoff(struct rtw89_dev *rtwdev,
2726 enum rtw89_rf_path path, bool off)
2727 {
2728 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2729 u8 val, kidx = dpk->cur_idx[path];
2730
2731 val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
2732
2733 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2734 MASKBYTE3, 0x6 | val);
2735
2736 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
2737 kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
2738 }
2739
_dpk_track(struct rtw89_dev * rtwdev)2740 static void _dpk_track(struct rtw89_dev *rtwdev)
2741 {
2742 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2743 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2744 u8 path, kidx;
2745 u8 trk_idx = 0, txagc_rf = 0;
2746 s8 txagc_bb = 0, txagc_bb_tp = 0, ini_diff = 0, txagc_ofst = 0;
2747 u16 pwsf[2];
2748 u8 cur_ther;
2749 s8 delta_ther[2] = {0};
2750
2751 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2752 kidx = dpk->cur_idx[path];
2753
2754 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2755 "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2756 path, kidx, dpk->bp[path][kidx].ch);
2757
2758 cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2759
2760 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2761 "[DPK_TRK] thermal now = %d\n", cur_ther);
2762
2763 if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
2764 delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
2765
2766 if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2767 delta_ther[path] = delta_ther[path] * 3 / 2;
2768 else
2769 delta_ther[path] = delta_ther[path] * 5 / 2;
2770
2771 txagc_rf = (u8)rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2772 RR_MODOPT_M_TXPWR);
2773
2774 if (rtwdev->is_tssi_mode[path]) {
2775 trk_idx = (u8)rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
2776
2777 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2778 "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
2779 txagc_rf, trk_idx);
2780
2781 txagc_bb =
2782 (s8)rtw89_phy_read32_mask(rtwdev,
2783 R_TXAGC_BB + (path << 13),
2784 MASKBYTE2);
2785 txagc_bb_tp =
2786 (u8)rtw89_phy_read32_mask(rtwdev,
2787 R_TXAGC_TP + (path << 13),
2788 B_TXAGC_TP);
2789
2790 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2791 "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2792 txagc_bb_tp, txagc_bb);
2793
2794 txagc_ofst =
2795 (s8)rtw89_phy_read32_mask(rtwdev,
2796 R_TXAGC_BB + (path << 13),
2797 MASKBYTE3);
2798
2799 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2800 "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
2801 txagc_ofst, delta_ther[path]);
2802
2803 if (rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
2804 BIT(15)) == 0x1)
2805 txagc_ofst = 0;
2806
2807 if (txagc_rf != 0 && cur_ther != 0)
2808 ini_diff = txagc_ofst + delta_ther[path];
2809
2810 if (rtw89_phy_read32_mask(rtwdev, R_P0_TXDPD + (path << 13),
2811 B_P0_TXDPD) == 0x0) {
2812 pwsf[0] = dpk->bp[path][kidx].pwsf + txagc_bb_tp -
2813 txagc_bb + ini_diff +
2814 tssi_info->extra_ofst[path];
2815 pwsf[1] = dpk->bp[path][kidx].pwsf + txagc_bb_tp -
2816 txagc_bb + ini_diff +
2817 tssi_info->extra_ofst[path];
2818 } else {
2819 pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff +
2820 tssi_info->extra_ofst[path];
2821 pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff +
2822 tssi_info->extra_ofst[path];
2823 }
2824
2825 } else {
2826 pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2827 pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2828 }
2829
2830 if (rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS) == 0x0 &&
2831 txagc_rf != 0) {
2832 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2833 "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
2834 pwsf[0], pwsf[1]);
2835
2836 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2837 0x000001FF, pwsf[0]);
2838 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2839 0x01FF0000, pwsf[1]);
2840 }
2841 }
2842 }
2843
_tssi_rf_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2844 static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2845 enum rtw89_rf_path path)
2846 {
2847 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2848 enum rtw89_band band = chan->band_type;
2849
2850 if (band == RTW89_BAND_2G)
2851 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
2852 else
2853 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
2854 }
2855
_tssi_set_sys(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2856 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2857 {
2858 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2859 enum rtw89_band band = chan->band_type;
2860
2861 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
2862 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2863 &rtw8852a_tssi_sys_defs_2g_tbl,
2864 &rtw8852a_tssi_sys_defs_5g_tbl);
2865 }
2866
_tssi_ini_txpwr_ctrl_bb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2867 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2868 enum rtw89_rf_path path)
2869 {
2870 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2871 enum rtw89_band band = chan->band_type;
2872
2873 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2874 &rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl,
2875 &rtw8852a_tssi_txpwr_ctrl_bb_defs_b_tbl);
2876 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2877 &rtw8852a_tssi_txpwr_ctrl_bb_defs_2g_tbl,
2878 &rtw8852a_tssi_txpwr_ctrl_bb_defs_5g_tbl);
2879 }
2880
_tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2881 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2882 enum rtw89_phy_idx phy,
2883 enum rtw89_rf_path path)
2884 {
2885 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2886 &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
2887 &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
2888 }
2889
_tssi_set_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2890 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2891 enum rtw89_rf_path path)
2892 {
2893 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2894 &rtw8852a_tssi_dck_defs_a_tbl,
2895 &rtw8852a_tssi_dck_defs_b_tbl);
2896 }
2897
_tssi_set_tmeter_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2898 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2899 enum rtw89_rf_path path)
2900 {
2901 #define __get_val(ptr, idx) \
2902 ({ \
2903 s8 *__ptr = (ptr); \
2904 u8 __idx = (idx), __i, __v; \
2905 u32 __val = 0; \
2906 for (__i = 0; __i < 4; __i++) { \
2907 __v = (__ptr[__idx + __i]); \
2908 __val |= (__v << (8 * __i)); \
2909 } \
2910 __val; \
2911 })
2912 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2913 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2914 u8 ch = chan->channel;
2915 u8 subband = chan->subband_type;
2916 const s8 *thm_up_a = NULL;
2917 const s8 *thm_down_a = NULL;
2918 const s8 *thm_up_b = NULL;
2919 const s8 *thm_down_b = NULL;
2920 u8 thermal = 0xff;
2921 s8 thm_ofst[64] = {0};
2922 u32 tmp = 0;
2923 u8 i, j;
2924
2925 switch (subband) {
2926 default:
2927 case RTW89_CH_2G:
2928 thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_p;
2929 thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_n;
2930 thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_p;
2931 thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_n;
2932 break;
2933 case RTW89_CH_5G_BAND_1:
2934 thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[0];
2935 thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[0];
2936 thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[0];
2937 thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[0];
2938 break;
2939 case RTW89_CH_5G_BAND_3:
2940 thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[1];
2941 thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[1];
2942 thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[1];
2943 thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[1];
2944 break;
2945 case RTW89_CH_5G_BAND_4:
2946 thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[2];
2947 thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[2];
2948 thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[2];
2949 thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[2];
2950 break;
2951 }
2952
2953 if (path == RF_PATH_A) {
2954 thermal = tssi_info->thermal[RF_PATH_A];
2955
2956 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2957 "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
2958
2959 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
2960 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
2961
2962 if (thermal == 0xff) {
2963 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
2964 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
2965
2966 for (i = 0; i < 64; i += 4) {
2967 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
2968
2969 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2970 "[TSSI] write 0x%x val=0x%08x\n",
2971 0x5c00 + i, 0x0);
2972 }
2973
2974 } else {
2975 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
2976 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
2977 thermal);
2978
2979 i = 0;
2980 for (j = 0; j < 32; j++)
2981 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2982 -thm_down_a[i++] :
2983 -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
2984
2985 i = 1;
2986 for (j = 63; j >= 32; j--)
2987 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2988 thm_up_a[i++] :
2989 thm_up_a[DELTA_SWINGIDX_SIZE - 1];
2990
2991 for (i = 0; i < 64; i += 4) {
2992 tmp = __get_val(thm_ofst, i);
2993 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
2994
2995 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2996 "[TSSI] write 0x%x val=0x%08x\n",
2997 0x5c00 + i, tmp);
2998 }
2999 }
3000 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
3001 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
3002
3003 } else {
3004 thermal = tssi_info->thermal[RF_PATH_B];
3005
3006 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3007 "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
3008
3009 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
3010 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
3011
3012 if (thermal == 0xff) {
3013 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
3014 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
3015
3016 for (i = 0; i < 64; i += 4) {
3017 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
3018
3019 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3020 "[TSSI] write 0x%x val=0x%08x\n",
3021 0x7c00 + i, 0x0);
3022 }
3023
3024 } else {
3025 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
3026 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
3027 thermal);
3028
3029 i = 0;
3030 for (j = 0; j < 32; j++)
3031 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3032 -thm_down_b[i++] :
3033 -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
3034
3035 i = 1;
3036 for (j = 63; j >= 32; j--)
3037 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3038 thm_up_b[i++] :
3039 thm_up_b[DELTA_SWINGIDX_SIZE - 1];
3040
3041 for (i = 0; i < 64; i += 4) {
3042 tmp = __get_val(thm_ofst, i);
3043 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
3044
3045 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3046 "[TSSI] write 0x%x val=0x%08x\n",
3047 0x7c00 + i, tmp);
3048 }
3049 }
3050 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
3051 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
3052 }
3053 #undef __get_val
3054 }
3055
_tssi_set_dac_gain_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3056 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3057 enum rtw89_rf_path path)
3058 {
3059 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3060 &rtw8852a_tssi_dac_gain_tbl_defs_a_tbl,
3061 &rtw8852a_tssi_dac_gain_tbl_defs_b_tbl);
3062 }
3063
_tssi_slope_cal_org(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3064 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3065 enum rtw89_rf_path path)
3066 {
3067 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3068 &rtw8852a_tssi_slope_cal_org_defs_a_tbl,
3069 &rtw8852a_tssi_slope_cal_org_defs_b_tbl);
3070 }
3071
_tssi_set_rf_gap_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3072 static void _tssi_set_rf_gap_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3073 enum rtw89_rf_path path)
3074 {
3075 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3076 &rtw8852a_tssi_rf_gap_tbl_defs_a_tbl,
3077 &rtw8852a_tssi_rf_gap_tbl_defs_b_tbl);
3078 }
3079
_tssi_set_slope(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3080 static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3081 enum rtw89_rf_path path)
3082 {
3083 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3084 &rtw8852a_tssi_slope_defs_a_tbl,
3085 &rtw8852a_tssi_slope_defs_b_tbl);
3086 }
3087
_tssi_set_track(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3088 static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3089 enum rtw89_rf_path path)
3090 {
3091 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3092 &rtw8852a_tssi_track_defs_a_tbl,
3093 &rtw8852a_tssi_track_defs_b_tbl);
3094 }
3095
_tssi_set_txagc_offset_mv_avg(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3096 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3097 enum rtw89_phy_idx phy,
3098 enum rtw89_rf_path path)
3099 {
3100 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3101 &rtw8852a_tssi_txagc_ofst_mv_avg_defs_a_tbl,
3102 &rtw8852a_tssi_txagc_ofst_mv_avg_defs_b_tbl);
3103 }
3104
_tssi_pak(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3105 static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3106 enum rtw89_rf_path path)
3107 {
3108 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3109 u8 subband = chan->subband_type;
3110
3111 switch (subband) {
3112 default:
3113 case RTW89_CH_2G:
3114 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3115 &rtw8852a_tssi_pak_defs_a_2g_tbl,
3116 &rtw8852a_tssi_pak_defs_b_2g_tbl);
3117 break;
3118 case RTW89_CH_5G_BAND_1:
3119 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3120 &rtw8852a_tssi_pak_defs_a_5g_1_tbl,
3121 &rtw8852a_tssi_pak_defs_b_5g_1_tbl);
3122 break;
3123 case RTW89_CH_5G_BAND_3:
3124 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3125 &rtw8852a_tssi_pak_defs_a_5g_3_tbl,
3126 &rtw8852a_tssi_pak_defs_b_5g_3_tbl);
3127 break;
3128 case RTW89_CH_5G_BAND_4:
3129 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3130 &rtw8852a_tssi_pak_defs_a_5g_4_tbl,
3131 &rtw8852a_tssi_pak_defs_b_5g_4_tbl);
3132 break;
3133 }
3134 }
3135
_tssi_enable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3136 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3137 {
3138 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3139 u8 i;
3140
3141 for (i = 0; i < RF_PATH_NUM_8852A; i++) {
3142 _tssi_set_track(rtwdev, phy, i);
3143 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3144
3145 rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
3146 &rtw8852a_tssi_enable_defs_a_tbl,
3147 &rtw8852a_tssi_enable_defs_b_tbl);
3148
3149 tssi_info->base_thermal[i] =
3150 ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
3151 rtwdev->is_tssi_mode[i] = true;
3152 }
3153 }
3154
_tssi_disable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3155 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3156 {
3157 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
3158
3159 rtwdev->is_tssi_mode[RF_PATH_A] = false;
3160 rtwdev->is_tssi_mode[RF_PATH_B] = false;
3161 }
3162
_tssi_get_cck_group(struct rtw89_dev * rtwdev,u8 ch)3163 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3164 {
3165 switch (ch) {
3166 case 1 ... 2:
3167 return 0;
3168 case 3 ... 5:
3169 return 1;
3170 case 6 ... 8:
3171 return 2;
3172 case 9 ... 11:
3173 return 3;
3174 case 12 ... 13:
3175 return 4;
3176 case 14:
3177 return 5;
3178 }
3179
3180 return 0;
3181 }
3182
3183 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
3184 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3185 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3186 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3187 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3188
_tssi_get_ofdm_group(struct rtw89_dev * rtwdev,u8 ch)3189 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3190 {
3191 switch (ch) {
3192 case 1 ... 2:
3193 return 0;
3194 case 3 ... 5:
3195 return 1;
3196 case 6 ... 8:
3197 return 2;
3198 case 9 ... 11:
3199 return 3;
3200 case 12 ... 14:
3201 return 4;
3202 case 36 ... 40:
3203 return 5;
3204 case 41 ... 43:
3205 return TSSI_EXTRA_GROUP(5);
3206 case 44 ... 48:
3207 return 6;
3208 case 49 ... 51:
3209 return TSSI_EXTRA_GROUP(6);
3210 case 52 ... 56:
3211 return 7;
3212 case 57 ... 59:
3213 return TSSI_EXTRA_GROUP(7);
3214 case 60 ... 64:
3215 return 8;
3216 case 100 ... 104:
3217 return 9;
3218 case 105 ... 107:
3219 return TSSI_EXTRA_GROUP(9);
3220 case 108 ... 112:
3221 return 10;
3222 case 113 ... 115:
3223 return TSSI_EXTRA_GROUP(10);
3224 case 116 ... 120:
3225 return 11;
3226 case 121 ... 123:
3227 return TSSI_EXTRA_GROUP(11);
3228 case 124 ... 128:
3229 return 12;
3230 case 129 ... 131:
3231 return TSSI_EXTRA_GROUP(12);
3232 case 132 ... 136:
3233 return 13;
3234 case 137 ... 139:
3235 return TSSI_EXTRA_GROUP(13);
3236 case 140 ... 144:
3237 return 14;
3238 case 149 ... 153:
3239 return 15;
3240 case 154 ... 156:
3241 return TSSI_EXTRA_GROUP(15);
3242 case 157 ... 161:
3243 return 16;
3244 case 162 ... 164:
3245 return TSSI_EXTRA_GROUP(16);
3246 case 165 ... 169:
3247 return 17;
3248 case 170 ... 172:
3249 return TSSI_EXTRA_GROUP(17);
3250 case 173 ... 177:
3251 return 18;
3252 }
3253
3254 return 0;
3255 }
3256
_tssi_get_trim_group(struct rtw89_dev * rtwdev,u8 ch)3257 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3258 {
3259 switch (ch) {
3260 case 1 ... 8:
3261 return 0;
3262 case 9 ... 14:
3263 return 1;
3264 case 36 ... 48:
3265 return 2;
3266 case 52 ... 64:
3267 return 3;
3268 case 100 ... 112:
3269 return 4;
3270 case 116 ... 128:
3271 return 5;
3272 case 132 ... 144:
3273 return 6;
3274 case 149 ... 177:
3275 return 7;
3276 }
3277
3278 return 0;
3279 }
3280
_tssi_get_ofdm_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3281 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3282 enum rtw89_rf_path path)
3283 {
3284 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3285 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3286 u8 ch = chan->channel;
3287 u32 gidx, gidx_1st, gidx_2nd;
3288 s8 de_1st = 0;
3289 s8 de_2nd = 0;
3290 s8 val;
3291
3292 gidx = _tssi_get_ofdm_group(rtwdev, ch);
3293
3294 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3295 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
3296 path, gidx);
3297
3298 if (IS_TSSI_EXTRA_GROUP(gidx)) {
3299 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3300 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3301 de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3302 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3303 val = (de_1st + de_2nd) / 2;
3304
3305 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3306 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3307 path, val, de_1st, de_2nd);
3308 } else {
3309 val = tssi_info->tssi_mcs[path][gidx];
3310
3311 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3312 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3313 }
3314
3315 return val;
3316 }
3317
_tssi_get_ofdm_trim_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3318 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
3319 enum rtw89_phy_idx phy,
3320 enum rtw89_rf_path path)
3321 {
3322 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3323 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3324 u8 ch = chan->channel;
3325 u32 tgidx, tgidx_1st, tgidx_2nd;
3326 s8 tde_1st = 0;
3327 s8 tde_2nd = 0;
3328 s8 val;
3329
3330 tgidx = _tssi_get_trim_group(rtwdev, ch);
3331
3332 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3333 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3334 path, tgidx);
3335
3336 if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3337 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3338 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3339 tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3340 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3341 val = (tde_1st + tde_2nd) / 2;
3342
3343 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3344 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3345 path, val, tde_1st, tde_2nd);
3346 } else {
3347 val = tssi_info->tssi_trim[path][tgidx];
3348
3349 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3350 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3351 path, val);
3352 }
3353
3354 return val;
3355 }
3356
_tssi_set_efuse_to_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3357 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
3358 enum rtw89_phy_idx phy)
3359 {
3360 #define __DE_MASK 0x003ff000
3361 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3362 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3363 static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
3364 static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
3365 static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
3366 static const u32 r_mcs_40m[RF_PATH_NUM_8852A] = {0x5840, 0x7840};
3367 static const u32 r_mcs_80m[RF_PATH_NUM_8852A] = {0x5848, 0x7848};
3368 static const u32 r_mcs_80m_80m[RF_PATH_NUM_8852A] = {0x5850, 0x7850};
3369 static const u32 r_mcs_5m[RF_PATH_NUM_8852A] = {0x5828, 0x7828};
3370 static const u32 r_mcs_10m[RF_PATH_NUM_8852A] = {0x5830, 0x7830};
3371 u8 ch = chan->channel;
3372 u8 i, gidx;
3373 s8 ofdm_de;
3374 s8 trim_de;
3375 s32 val;
3376
3377 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3378 phy, ch);
3379
3380 for (i = 0; i < RF_PATH_NUM_8852A; i++) {
3381 gidx = _tssi_get_cck_group(rtwdev, ch);
3382 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3383 val = tssi_info->tssi_cck[i][gidx] + trim_de;
3384
3385 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3386 "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3387 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3388
3389 rtw89_phy_write32_mask(rtwdev, r_cck_long[i], __DE_MASK, val);
3390 rtw89_phy_write32_mask(rtwdev, r_cck_short[i], __DE_MASK, val);
3391
3392 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3393 "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3394 r_cck_long[i],
3395 rtw89_phy_read32_mask(rtwdev, r_cck_long[i],
3396 __DE_MASK));
3397
3398 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
3399 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3400 val = ofdm_de + trim_de;
3401
3402 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3403 "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3404 i, ofdm_de, trim_de);
3405
3406 rtw89_phy_write32_mask(rtwdev, r_mcs_20m[i], __DE_MASK, val);
3407 rtw89_phy_write32_mask(rtwdev, r_mcs_40m[i], __DE_MASK, val);
3408 rtw89_phy_write32_mask(rtwdev, r_mcs_80m[i], __DE_MASK, val);
3409 rtw89_phy_write32_mask(rtwdev, r_mcs_80m_80m[i], __DE_MASK, val);
3410 rtw89_phy_write32_mask(rtwdev, r_mcs_5m[i], __DE_MASK, val);
3411 rtw89_phy_write32_mask(rtwdev, r_mcs_10m[i], __DE_MASK, val);
3412
3413 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3414 "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3415 r_mcs_20m[i],
3416 rtw89_phy_read32_mask(rtwdev, r_mcs_20m[i],
3417 __DE_MASK));
3418 }
3419 #undef __DE_MASK
3420 }
3421
_tssi_track(struct rtw89_dev * rtwdev)3422 static void _tssi_track(struct rtw89_dev *rtwdev)
3423 {
3424 static const u32 tx_gain_scale_table[] = {
3425 0x400, 0x40e, 0x41d, 0x427, 0x43c, 0x44c, 0x45c, 0x46c,
3426 0x400, 0x39d, 0x3ab, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f1
3427 };
3428 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3429 u8 path;
3430 u8 cur_ther;
3431 s32 delta_ther = 0, gain_offset_int, gain_offset_float;
3432 s8 gain_offset;
3433
3434 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] %s:\n",
3435 __func__);
3436
3437 if (!rtwdev->is_tssi_mode[RF_PATH_A])
3438 return;
3439 if (!rtwdev->is_tssi_mode[RF_PATH_B])
3440 return;
3441
3442 for (path = RF_PATH_A; path < RF_PATH_NUM_8852A; path++) {
3443 if (!tssi_info->tssi_tracking_check[path]) {
3444 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] return!!!\n");
3445 continue;
3446 }
3447
3448 cur_ther = (u8)rtw89_phy_read32_mask(rtwdev,
3449 R_TSSI_THER + (path << 13),
3450 B_TSSI_THER);
3451
3452 if (cur_ther == 0 || tssi_info->base_thermal[path] == 0)
3453 continue;
3454
3455 delta_ther = cur_ther - tssi_info->base_thermal[path];
3456
3457 gain_offset = (s8)delta_ther * 15 / 10;
3458
3459 tssi_info->extra_ofst[path] = gain_offset;
3460
3461 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3462 "[TSSI][TRK] base_thermal=%d gain_offset=0x%x path=%d\n",
3463 tssi_info->base_thermal[path], gain_offset, path);
3464
3465 gain_offset_int = gain_offset >> 3;
3466 gain_offset_float = gain_offset & 7;
3467
3468 if (gain_offset_int > 15)
3469 gain_offset_int = 15;
3470 else if (gain_offset_int < -16)
3471 gain_offset_int = -16;
3472
3473 rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN + (path << 13),
3474 B_DPD_OFT_EN, 0x1);
3475
3476 rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13),
3477 B_TXGAIN_SCALE_EN, 0x1);
3478
3479 rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_ADDR + (path << 13),
3480 B_DPD_OFT_ADDR, gain_offset_int);
3481
3482 rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13),
3483 B_TXGAIN_SCALE_OFT,
3484 tx_gain_scale_table[gain_offset_float]);
3485 }
3486 }
3487
_tssi_high_power(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3488 static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3489 {
3490 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3491 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3492 u8 ch = chan->channel, ch_tmp;
3493 u8 bw = chan->band_width;
3494 u8 band = chan->band_type;
3495 u8 subband = chan->subband_type;
3496 s8 power;
3497 s32 xdbm;
3498
3499 if (bw == RTW89_CHANNEL_WIDTH_40)
3500 ch_tmp = ch - 2;
3501 else if (bw == RTW89_CHANNEL_WIDTH_80)
3502 ch_tmp = ch - 6;
3503 else
3504 ch_tmp = ch;
3505
3506 power = rtw89_phy_read_txpwr_limit(rtwdev, band, bw, RTW89_1TX,
3507 RTW89_RS_MCS, RTW89_NONBF, ch_tmp);
3508
3509 xdbm = power * 100 / 4;
3510
3511 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d xdbm=%d\n",
3512 __func__, phy, xdbm);
3513
3514 if (xdbm > 1800 && subband == RTW89_CH_2G) {
3515 tssi_info->tssi_tracking_check[RF_PATH_A] = true;
3516 tssi_info->tssi_tracking_check[RF_PATH_B] = true;
3517 } else {
3518 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_tracking_defs_tbl);
3519 tssi_info->extra_ofst[RF_PATH_A] = 0;
3520 tssi_info->extra_ofst[RF_PATH_B] = 0;
3521 tssi_info->tssi_tracking_check[RF_PATH_A] = false;
3522 tssi_info->tssi_tracking_check[RF_PATH_B] = false;
3523 }
3524 }
3525
_tssi_hw_tx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 path,s16 pwr_dbm,u8 enable)3526 static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3527 u8 path, s16 pwr_dbm, u8 enable)
3528 {
3529 rtw8852a_bb_set_plcp_tx(rtwdev);
3530 rtw8852a_bb_cfg_tx_path(rtwdev, path);
3531 rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy);
3532 rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy);
3533 }
3534
_tssi_pre_tx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3535 static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3536 {
3537 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3538 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3539 const struct rtw89_chip_info *mac_reg = rtwdev->chip;
3540 u8 ch = chan->channel, ch_tmp;
3541 u8 bw = chan->band_width;
3542 u8 band = chan->band_type;
3543 u32 tx_en;
3544 u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
3545 s8 power;
3546 s16 xdbm;
3547 u32 i, tx_counter = 0;
3548
3549 if (bw == RTW89_CHANNEL_WIDTH_40)
3550 ch_tmp = ch - 2;
3551 else if (bw == RTW89_CHANNEL_WIDTH_80)
3552 ch_tmp = ch - 6;
3553 else
3554 ch_tmp = ch;
3555
3556 power = rtw89_phy_read_txpwr_limit(rtwdev, band, RTW89_CHANNEL_WIDTH_20,
3557 RTW89_1TX, RTW89_RS_OFDM,
3558 RTW89_NONBF, ch_tmp);
3559
3560 xdbm = (power * 100) >> mac_reg->txpwr_factor_mac;
3561
3562 if (xdbm > 1800)
3563 xdbm = 68;
3564 else
3565 xdbm = power * 2;
3566
3567 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3568 "[TSSI] %s: phy=%d org_power=%d xdbm=%d\n",
3569 __func__, phy, power, xdbm);
3570
3571 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3572 rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
3573 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
3574 tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3575
3576 _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true);
3577 mdelay(15);
3578 _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false);
3579
3580 tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) -
3581 tx_counter;
3582
3583 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0xc000 &&
3584 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0x0) {
3585 for (i = 0; i < 6; i++) {
3586 tssi_info->default_txagc_offset[RF_PATH_A] =
3587 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
3588 MASKBYTE3);
3589
3590 if (tssi_info->default_txagc_offset[RF_PATH_A] != 0x0)
3591 break;
3592 }
3593 }
3594
3595 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0xc000 &&
3596 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0x0) {
3597 for (i = 0; i < 6; i++) {
3598 tssi_info->default_txagc_offset[RF_PATH_B] =
3599 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
3600 MASKBYTE3);
3601
3602 if (tssi_info->default_txagc_offset[RF_PATH_B] != 0x0)
3603 break;
3604 }
3605 }
3606
3607 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3608 "[TSSI] %s: tx counter=%d\n",
3609 __func__, tx_counter);
3610
3611 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3612 "[TSSI] Backup R_TXAGC_BB=0x%x R_TXAGC_BB_S1=0x%x\n",
3613 tssi_info->default_txagc_offset[RF_PATH_A],
3614 tssi_info->default_txagc_offset[RF_PATH_B]);
3615
3616 rtw8852a_bb_tx_mode_switch(rtwdev, phy, 0);
3617
3618 rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
3619 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3620 }
3621
rtw8852a_rck(struct rtw89_dev * rtwdev)3622 void rtw8852a_rck(struct rtw89_dev *rtwdev)
3623 {
3624 u8 path;
3625
3626 for (path = 0; path < 2; path++)
3627 _rck(rtwdev, path);
3628 }
3629
rtw8852a_dack(struct rtw89_dev * rtwdev)3630 void rtw8852a_dack(struct rtw89_dev *rtwdev)
3631 {
3632 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
3633
3634 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
3635 _dac_cal(rtwdev, false);
3636 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
3637 }
3638
rtw8852a_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)3639 void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3640 {
3641 u32 tx_en;
3642 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3643
3644 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
3645 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3646 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3647
3648 _iqk_init(rtwdev);
3649 if (rtwdev->dbcc_en)
3650 _iqk_dbcc(rtwdev, phy_idx);
3651 else
3652 _iqk(rtwdev, phy_idx, false);
3653
3654 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3655 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
3656 }
3657
rtw8852a_iqk_track(struct rtw89_dev * rtwdev)3658 void rtw8852a_iqk_track(struct rtw89_dev *rtwdev)
3659 {
3660 _iqk_track(rtwdev);
3661 }
3662
rtw8852a_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,bool is_afe)3663 void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3664 bool is_afe)
3665 {
3666 u32 tx_en;
3667 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3668
3669 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
3670 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3671 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3672
3673 _rx_dck(rtwdev, phy_idx, is_afe);
3674
3675 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3676 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
3677 }
3678
rtw8852a_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)3679 void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3680 {
3681 u32 tx_en;
3682 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3683
3684 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3685 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3686 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3687
3688 rtwdev->dpk.is_dpk_enable = true;
3689 rtwdev->dpk.is_dpk_reload_en = false;
3690 _dpk(rtwdev, phy_idx, false);
3691
3692 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3693 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3694 }
3695
rtw8852a_dpk_track(struct rtw89_dev * rtwdev)3696 void rtw8852a_dpk_track(struct rtw89_dev *rtwdev)
3697 {
3698 _dpk_track(rtwdev);
3699 }
3700
rtw8852a_tssi(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3701 void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3702 {
3703 u8 i;
3704
3705 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
3706 __func__, phy);
3707
3708 _tssi_disable(rtwdev, phy);
3709
3710 for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
3711 _tssi_rf_setting(rtwdev, phy, i);
3712 _tssi_set_sys(rtwdev, phy);
3713 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
3714 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
3715 _tssi_set_dck(rtwdev, phy, i);
3716 _tssi_set_tmeter_tbl(rtwdev, phy, i);
3717 _tssi_set_dac_gain_tbl(rtwdev, phy, i);
3718 _tssi_slope_cal_org(rtwdev, phy, i);
3719 _tssi_set_rf_gap_tbl(rtwdev, phy, i);
3720 _tssi_set_slope(rtwdev, phy, i);
3721 _tssi_pak(rtwdev, phy, i);
3722 }
3723
3724 _tssi_enable(rtwdev, phy);
3725 _tssi_set_efuse_to_de(rtwdev, phy);
3726 _tssi_high_power(rtwdev, phy);
3727 _tssi_pre_tx(rtwdev, phy);
3728 }
3729
rtw8852a_tssi_scan(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3730 void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3731 {
3732 u8 i;
3733
3734 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
3735 __func__, phy);
3736
3737 if (!rtwdev->is_tssi_mode[RF_PATH_A])
3738 return;
3739 if (!rtwdev->is_tssi_mode[RF_PATH_B])
3740 return;
3741
3742 _tssi_disable(rtwdev, phy);
3743
3744 for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
3745 _tssi_rf_setting(rtwdev, phy, i);
3746 _tssi_set_sys(rtwdev, phy);
3747 _tssi_set_tmeter_tbl(rtwdev, phy, i);
3748 _tssi_pak(rtwdev, phy, i);
3749 }
3750
3751 _tssi_enable(rtwdev, phy);
3752 _tssi_set_efuse_to_de(rtwdev, phy);
3753 }
3754
rtw8852a_tssi_track(struct rtw89_dev * rtwdev)3755 void rtw8852a_tssi_track(struct rtw89_dev *rtwdev)
3756 {
3757 _tssi_track(rtwdev);
3758 }
3759
3760 static
_rtw8852a_tssi_avg_scan(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3761 void _rtw8852a_tssi_avg_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3762 {
3763 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3764 return;
3765
3766 /* disable */
3767 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
3768
3769 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x0);
3770 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x0);
3771
3772 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x0);
3773 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x0);
3774
3775 /* enable */
3776 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl);
3777 }
3778
3779 static
_rtw8852a_tssi_set_avg(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3780 void _rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3781 {
3782 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3783 return;
3784
3785 /* disable */
3786 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
3787
3788 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x4);
3789 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
3790
3791 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x4);
3792 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
3793
3794 /* enable */
3795 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl);
3796 }
3797
rtw8852a_tssi_set_avg(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool enable)3798 static void rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev,
3799 enum rtw89_phy_idx phy, bool enable)
3800 {
3801 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3802 return;
3803
3804 if (enable) {
3805 /* SCAN_START */
3806 _rtw8852a_tssi_avg_scan(rtwdev, phy);
3807 } else {
3808 /* SCAN_END */
3809 _rtw8852a_tssi_set_avg(rtwdev, phy);
3810 }
3811 }
3812
rtw8852a_tssi_default_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool enable)3813 static void rtw8852a_tssi_default_txagc(struct rtw89_dev *rtwdev,
3814 enum rtw89_phy_idx phy, bool enable)
3815 {
3816 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3817 u8 i;
3818
3819 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3820 return;
3821
3822 if (enable) {
3823 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
3824 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
3825 for (i = 0; i < 6; i++) {
3826 tssi_info->default_txagc_offset[RF_PATH_A] =
3827 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
3828 B_TXAGC_BB);
3829 if (tssi_info->default_txagc_offset[RF_PATH_A])
3830 break;
3831 }
3832 }
3833
3834 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
3835 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
3836 for (i = 0; i < 6; i++) {
3837 tssi_info->default_txagc_offset[RF_PATH_B] =
3838 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
3839 B_TXAGC_BB_S1);
3840 if (tssi_info->default_txagc_offset[RF_PATH_B])
3841 break;
3842 }
3843 }
3844 } else {
3845 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
3846 tssi_info->default_txagc_offset[RF_PATH_A]);
3847 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
3848 tssi_info->default_txagc_offset[RF_PATH_B]);
3849
3850 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
3851 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
3852
3853 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
3854 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
3855 }
3856 }
3857
rtw8852a_wifi_scan_notify(struct rtw89_dev * rtwdev,bool scan_start,enum rtw89_phy_idx phy_idx)3858 void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev,
3859 bool scan_start, enum rtw89_phy_idx phy_idx)
3860 {
3861 if (scan_start) {
3862 rtw8852a_tssi_default_txagc(rtwdev, phy_idx, true);
3863 rtw8852a_tssi_set_avg(rtwdev, phy_idx, true);
3864 } else {
3865 rtw8852a_tssi_default_txagc(rtwdev, phy_idx, false);
3866 rtw8852a_tssi_set_avg(rtwdev, phy_idx, false);
3867 }
3868 }
3869