1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2022 Realtek Corporation
3 */
4
5 #include "coex.h"
6 #include "debug.h"
7 #include "phy.h"
8 #include "reg.h"
9 #include "rtw8852c.h"
10 #include "rtw8852c_rfk.h"
11 #include "rtw8852c_rfk_table.h"
12 #include "rtw8852c_table.h"
13
14 #define _TSSI_DE_MASK GENMASK(21, 12)
15 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852C] = {0x5858, 0x7858};
16 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852C] = {0x5860, 0x7860};
17 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852C] = {0x5838, 0x7838};
18 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852C] = {0x5840, 0x7840};
19 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852C] = {0x5848, 0x7848};
20 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852C] = {0x5850, 0x7850};
21 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852C] = {0x5828, 0x7828};
22 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852C] = {0x5830, 0x7830};
23
24 static const u32 rtw8852c_backup_bb_regs[] = {
25 0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0e8, 0x823c, 0x8224, 0x8220,
26 0xc1d4, 0xc1d8, 0xc1e8
27 };
28
29 static const u32 rtw8852c_backup_rf_regs[] = {
30 0xdf, 0x8f, 0x97, 0xa3, 0x5, 0x10005
31 };
32
33 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852c_backup_bb_regs)
34 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852c_backup_rf_regs)
35
36 #define RXK_GROUP_NR 4
37 static const u32 _rxk_a6_idxrxgain[RXK_GROUP_NR] = {0x190, 0x196, 0x290, 0x316};
38 static const u32 _rxk_a6_idxattc2[RXK_GROUP_NR] = {0x00, 0x0, 0x00, 0x00};
39 static const u32 _rxk_a_idxrxgain[RXK_GROUP_NR] = {0x190, 0x198, 0x310, 0x318};
40 static const u32 _rxk_a_idxattc2[RXK_GROUP_NR] = {0x00, 0x00, 0x00, 0x00};
41 static const u32 _rxk_g_idxrxgain[RXK_GROUP_NR] = {0x252, 0x26c, 0x350, 0x360};
42 static const u32 _rxk_g_idxattc2[RXK_GROUP_NR] = {0x00, 0x07, 0x00, 0x3};
43
44 #define TXK_GROUP_NR 3
45 static const u32 _txk_a6_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
46 static const u32 _txk_a6_track_range[TXK_GROUP_NR] = {0x6, 0x7, 0x7};
47 static const u32 _txk_a6_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
48 static const u32 _txk_a6_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
49 static const u32 _txk_a_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
50 static const u32 _txk_a_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x7};
51 static const u32 _txk_a_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
52 static const u32 _txk_a_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
53 static const u32 _txk_g_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
54 static const u32 _txk_g_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x6};
55 static const u32 _txk_g_gain_bb[TXK_GROUP_NR] = {0x0e, 0x0a, 0x0e};
56 static const u32 _txk_g_itqt[TXK_GROUP_NR] = { 0x12, 0x12, 0x12};
57
58 static const u32 dpk_par_regs[RTW89_DPK_RF_PATH][4] = {
59 {0x8190, 0x8194, 0x8198, 0x81a4},
60 {0x81a8, 0x81c4, 0x81c8, 0x81e8},
61 };
62
_kpath(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)63 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
64 {
65 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
66 rtwdev->dbcc_en, phy_idx);
67
68 if (!rtwdev->dbcc_en)
69 return RF_AB;
70
71 if (phy_idx == RTW89_PHY_0)
72 return RF_A;
73 else
74 return RF_B;
75 }
76
_rfk_backup_bb_reg(struct rtw89_dev * rtwdev,u32 backup_bb_reg_val[])77 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
78 {
79 u32 i;
80
81 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
82 backup_bb_reg_val[i] =
83 rtw89_phy_read32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
84 MASKDWORD);
85 rtw89_debug(rtwdev, RTW89_DBG_RFK,
86 "[IQK]backup bb reg : %x, value =%x\n",
87 rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
88 }
89 }
90
_rfk_backup_rf_reg(struct rtw89_dev * rtwdev,u32 backup_rf_reg_val[],u8 rf_path)91 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
92 u8 rf_path)
93 {
94 u32 i;
95
96 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
97 backup_rf_reg_val[i] =
98 rtw89_read_rf(rtwdev, rf_path,
99 rtw8852c_backup_rf_regs[i], RFREG_MASK);
100 rtw89_debug(rtwdev, RTW89_DBG_RFK,
101 "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
102 rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
103 }
104 }
105
_rfk_restore_bb_reg(struct rtw89_dev * rtwdev,u32 backup_bb_reg_val[])106 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
107 {
108 u32 i;
109
110 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
111 rtw89_phy_write32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
112 MASKDWORD, backup_bb_reg_val[i]);
113 rtw89_debug(rtwdev, RTW89_DBG_RFK,
114 "[IQK]restore bb reg : %x, value =%x\n",
115 rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
116 }
117 }
118
_rfk_restore_rf_reg(struct rtw89_dev * rtwdev,u32 backup_rf_reg_val[],u8 rf_path)119 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
120 u8 rf_path)
121 {
122 u32 i;
123
124 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
125 rtw89_write_rf(rtwdev, rf_path, rtw8852c_backup_rf_regs[i],
126 RFREG_MASK, backup_rf_reg_val[i]);
127
128 rtw89_debug(rtwdev, RTW89_DBG_RFK,
129 "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
130 rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
131 }
132 }
133
_wait_rx_mode(struct rtw89_dev * rtwdev,u8 kpath)134 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
135 {
136 u8 path;
137 u32 rf_mode;
138 int ret;
139
140 for (path = 0; path < RF_PATH_MAX; path++) {
141 if (!(kpath & BIT(path)))
142 continue;
143
144 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
145 2, 5000, false, rtwdev, path, 0x00,
146 RR_MOD_MASK);
147 rtw89_debug(rtwdev, RTW89_DBG_RFK,
148 "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
149 path, ret);
150 }
151 }
152
_dack_dump(struct rtw89_dev * rtwdev)153 static void _dack_dump(struct rtw89_dev *rtwdev)
154 {
155 struct rtw89_dack_info *dack = &rtwdev->dack;
156 u8 i;
157 u8 t;
158
159 rtw89_debug(rtwdev, RTW89_DBG_RFK,
160 "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
161 dack->addck_d[0][0], dack->addck_d[0][1]);
162 rtw89_debug(rtwdev, RTW89_DBG_RFK,
163 "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
164 dack->addck_d[1][0], dack->addck_d[1][1]);
165 rtw89_debug(rtwdev, RTW89_DBG_RFK,
166 "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
167 dack->dadck_d[0][0], dack->dadck_d[0][1]);
168 rtw89_debug(rtwdev, RTW89_DBG_RFK,
169 "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
170 dack->dadck_d[1][0], dack->dadck_d[1][1]);
171
172 rtw89_debug(rtwdev, RTW89_DBG_RFK,
173 "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
174 dack->biask_d[0][0], dack->biask_d[0][1]);
175 rtw89_debug(rtwdev, RTW89_DBG_RFK,
176 "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
177 dack->biask_d[1][0], dack->biask_d[1][1]);
178
179 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
180 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
181 t = dack->msbk_d[0][0][i];
182 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
183 }
184 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
185 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
186 t = dack->msbk_d[0][1][i];
187 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
188 }
189 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
190 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
191 t = dack->msbk_d[1][0][i];
192 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
193 }
194 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
195 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
196 t = dack->msbk_d[1][1][i];
197 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
198 }
199 }
200
_addck_backup(struct rtw89_dev * rtwdev)201 static void _addck_backup(struct rtw89_dev *rtwdev)
202 {
203 struct rtw89_dack_info *dack = &rtwdev->dack;
204
205 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
206 dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
207 B_ADDCKR0_A0);
208 dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
209 B_ADDCKR0_A1);
210
211 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
212 dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
213 B_ADDCKR1_A0);
214 dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
215 B_ADDCKR1_A1);
216 }
217
_addck_reload(struct rtw89_dev * rtwdev)218 static void _addck_reload(struct rtw89_dev *rtwdev)
219 {
220 struct rtw89_dack_info *dack = &rtwdev->dack;
221
222 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1,
223 dack->addck_d[0][0]);
224 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0,
225 dack->addck_d[0][1]);
226 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
227 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1,
228 dack->addck_d[1][0]);
229 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0,
230 dack->addck_d[1][1]);
231 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3);
232 }
233
_dack_backup_s0(struct rtw89_dev * rtwdev)234 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
235 {
236 struct rtw89_dack_info *dack = &rtwdev->dack;
237 u8 i;
238
239 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
240 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
241 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
242 dack->msbk_d[0][0][i] = rtw89_phy_read32_mask(rtwdev,
243 R_DACK_S0P2,
244 B_DACK_S0M0);
245 rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
246 dack->msbk_d[0][1][i] = rtw89_phy_read32_mask(rtwdev,
247 R_DACK_S0P3,
248 B_DACK_S0M1);
249 }
250 dack->biask_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00,
251 B_DACK_BIAS00);
252 dack->biask_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01,
253 B_DACK_BIAS01);
254 dack->dadck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00,
255 B_DACK_DADCK00);
256 dack->dadck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01,
257 B_DACK_DADCK01);
258 }
259
_dack_backup_s1(struct rtw89_dev * rtwdev)260 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
261 {
262 struct rtw89_dack_info *dack = &rtwdev->dack;
263 u8 i;
264
265 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
266 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
267 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
268 dack->msbk_d[1][0][i] = rtw89_phy_read32_mask(rtwdev,
269 R_DACK10S,
270 B_DACK10S);
271 rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
272 dack->msbk_d[1][1][i] = rtw89_phy_read32_mask(rtwdev,
273 R_DACK11S,
274 B_DACK11S);
275 }
276 dack->biask_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10,
277 B_DACK_BIAS10);
278 dack->biask_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11,
279 B_DACK_BIAS11);
280 dack->dadck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10,
281 B_DACK_DADCK10);
282 dack->dadck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11,
283 B_DACK_DADCK11);
284 }
285
_dack_reload_by_path(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 index)286 static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
287 enum rtw89_rf_path path, u8 index)
288 {
289 struct rtw89_dack_info *dack = &rtwdev->dack;
290 u32 idx_offset, path_offset;
291 u32 val32, offset, addr;
292 u8 i;
293
294 idx_offset = (index == 0 ? 0 : 0x14);
295 path_offset = (path == RF_PATH_A ? 0 : 0x28);
296 offset = idx_offset + path_offset;
297
298 rtw89_rfk_parser(rtwdev, &rtw8852c_dack_reload_defs_tbl);
299
300 /* msbk_d: 15/14/13/12 */
301 val32 = 0x0;
302 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
303 val32 |= dack->msbk_d[path][index][i + 12] << (i * 8);
304 addr = 0xc200 + offset;
305 rtw89_phy_write32(rtwdev, addr, val32);
306 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
307 rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
308
309 /* msbk_d: 11/10/9/8 */
310 val32 = 0x0;
311 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
312 val32 |= dack->msbk_d[path][index][i + 8] << (i * 8);
313 addr = 0xc204 + offset;
314 rtw89_phy_write32(rtwdev, addr, val32);
315 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
316 rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
317
318 /* msbk_d: 7/6/5/4 */
319 val32 = 0x0;
320 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
321 val32 |= dack->msbk_d[path][index][i + 4] << (i * 8);
322 addr = 0xc208 + offset;
323 rtw89_phy_write32(rtwdev, addr, val32);
324 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
325 rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
326
327 /* msbk_d: 3/2/1/0 */
328 val32 = 0x0;
329 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
330 val32 |= dack->msbk_d[path][index][i] << (i * 8);
331 addr = 0xc20c + offset;
332 rtw89_phy_write32(rtwdev, addr, val32);
333 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
334 rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
335
336 /* dadak_d/biask_d */
337 val32 = (dack->biask_d[path][index] << 22) |
338 (dack->dadck_d[path][index] << 14);
339 addr = 0xc210 + offset;
340 rtw89_phy_write32(rtwdev, addr, val32);
341 rtw89_phy_write32_set(rtwdev, addr, BIT(1));
342 }
343
_dack_reload(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)344 static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
345 {
346 u8 i;
347
348 for (i = 0; i < 2; i++)
349 _dack_reload_by_path(rtwdev, path, i);
350 }
351
_addck(struct rtw89_dev * rtwdev)352 static void _addck(struct rtw89_dev *rtwdev)
353 {
354 struct rtw89_dack_info *dack = &rtwdev->dack;
355 u32 val;
356 int ret;
357
358 /* S0 */
359 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1);
360 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1);
361 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0);
362 fsleep(1);
363 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
364
365 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
366 1, 10000, false, rtwdev, 0xc0fc, BIT(0));
367 if (ret) {
368 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
369 dack->addck_timeout[0] = true;
370 }
371
372 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0);
373
374 /* S1 */
375 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x1);
376 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x1);
377 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x0);
378 udelay(1);
379 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
380
381 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
382 1, 10000, false, rtwdev, 0xc1fc, BIT(0));
383 if (ret) {
384 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
385 dack->addck_timeout[0] = true;
386 }
387 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x0);
388 }
389
_dack_reset(struct rtw89_dev * rtwdev,u8 path)390 static void _dack_reset(struct rtw89_dev *rtwdev, u8 path)
391 {
392 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
393 &rtw8852c_dack_reset_defs_a_tbl,
394 &rtw8852c_dack_reset_defs_b_tbl);
395 }
396
397 enum adc_ck {
398 ADC_NA = 0,
399 ADC_480M = 1,
400 ADC_960M = 2,
401 ADC_1920M = 3,
402 };
403
404 enum dac_ck {
405 DAC_40M = 0,
406 DAC_80M = 1,
407 DAC_120M = 2,
408 DAC_160M = 3,
409 DAC_240M = 4,
410 DAC_320M = 5,
411 DAC_480M = 6,
412 DAC_960M = 7,
413 };
414
415 enum rf_mode {
416 RF_SHUT_DOWN = 0x0,
417 RF_STANDBY = 0x1,
418 RF_TX = 0x2,
419 RF_RX = 0x3,
420 RF_TXIQK = 0x4,
421 RF_DPK = 0x5,
422 RF_RXK1 = 0x6,
423 RF_RXK2 = 0x7,
424 };
425
rtw8852c_txck_force(struct rtw89_dev * rtwdev,u8 path,bool force,enum dac_ck ck)426 static void rtw8852c_txck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
427 enum dac_ck ck)
428 {
429 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
430
431 if (!force)
432 return;
433
434 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
435 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
436 }
437
rtw8852c_rxck_force(struct rtw89_dev * rtwdev,u8 path,bool force,enum adc_ck ck)438 static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
439 enum adc_ck ck)
440 {
441 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
442
443 if (!force)
444 return;
445
446 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
447 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
448 }
449
_check_dack_done(struct rtw89_dev * rtwdev,bool s0)450 static bool _check_dack_done(struct rtw89_dev *rtwdev, bool s0)
451 {
452 if (s0) {
453 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
454 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
455 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
456 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
457 return false;
458 } else {
459 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
460 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
461 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
462 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
463 return false;
464 }
465
466 return true;
467 }
468
_dack_s0(struct rtw89_dev * rtwdev)469 static void _dack_s0(struct rtw89_dev *rtwdev)
470 {
471 struct rtw89_dack_info *dack = &rtwdev->dack;
472 bool done;
473 int ret;
474
475 rtw8852c_txck_force(rtwdev, RF_PATH_A, true, DAC_160M);
476 rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s0_tbl);
477
478 _dack_reset(rtwdev, RF_PATH_A);
479
480 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
481 ret = read_poll_timeout_atomic(_check_dack_done, done, done,
482 1, 10000, false, rtwdev, true);
483 if (ret) {
484 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
485 dack->msbk_timeout[0] = true;
486 }
487 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0);
488 rtw8852c_txck_force(rtwdev, RF_PATH_A, false, DAC_960M);
489 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
490
491 _dack_backup_s0(rtwdev);
492 _dack_reload(rtwdev, RF_PATH_A);
493 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
494 }
495
_dack_s1(struct rtw89_dev * rtwdev)496 static void _dack_s1(struct rtw89_dev *rtwdev)
497 {
498 struct rtw89_dack_info *dack = &rtwdev->dack;
499 bool done;
500 int ret;
501
502 rtw8852c_txck_force(rtwdev, RF_PATH_B, true, DAC_160M);
503 rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s1_tbl);
504
505 _dack_reset(rtwdev, RF_PATH_B);
506
507 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1);
508 ret = read_poll_timeout_atomic(_check_dack_done, done, done,
509 1, 10000, false, rtwdev, false);
510 if (ret) {
511 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n");
512 dack->msbk_timeout[0] = true;
513 }
514 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0);
515 rtw8852c_txck_force(rtwdev, RF_PATH_B, false, DAC_960M);
516 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
517
518 _dack_backup_s1(rtwdev);
519 _dack_reload(rtwdev, RF_PATH_B);
520 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
521 }
522
_dack(struct rtw89_dev * rtwdev)523 static void _dack(struct rtw89_dev *rtwdev)
524 {
525 _dack_s0(rtwdev);
526 _dack_s1(rtwdev);
527 }
528
_drck(struct rtw89_dev * rtwdev)529 static void _drck(struct rtw89_dev *rtwdev)
530 {
531 u32 val;
532 int ret;
533
534 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
535 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
536 1, 10000, false, rtwdev, 0xc0c8, BIT(3));
537 if (ret)
538 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
539
540 rtw89_rfk_parser(rtwdev, &rtw8852c_drck_defs_tbl);
541
542 val = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, B_DRCK_RES);
543 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
544 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, val);
545 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
546 rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
547 }
548
_dac_cal(struct rtw89_dev * rtwdev,bool force)549 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
550 {
551 struct rtw89_dack_info *dack = &rtwdev->dack;
552 u32 rf0_0, rf1_0;
553 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
554
555 dack->dack_done = false;
556 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
557 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
558 rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
559 rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
560 _drck(rtwdev);
561
562 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
563 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
564 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
565 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
566 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
567 _addck(rtwdev);
568 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
569
570 _addck_backup(rtwdev);
571 _addck_reload(rtwdev);
572 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
573 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
574 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
575 _dack(rtwdev);
576 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
577
578 _dack_dump(rtwdev);
579 dack->dack_done = true;
580 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
581 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
582 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
583 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
584 dack->dack_cnt++;
585 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
586 }
587
588 #define RTW8852C_NCTL_VER 0xd
589 #define RTW8852C_IQK_VER 0x2a
590 #define RTW8852C_IQK_SS 2
591 #define RTW8852C_IQK_THR_REK 8
592 #define RTW8852C_IQK_CFIR_GROUP_NR 4
593
594 enum rtw8852c_iqk_type {
595 ID_TXAGC,
596 ID_G_FLOK_COARSE,
597 ID_A_FLOK_COARSE,
598 ID_G_FLOK_FINE,
599 ID_A_FLOK_FINE,
600 ID_FLOK_VBUFFER,
601 ID_TXK,
602 ID_RXAGC,
603 ID_RXK,
604 ID_NBTXK,
605 ID_NBRXK,
606 };
607
rtw8852c_disable_rxagc(struct rtw89_dev * rtwdev,u8 path,u8 en_rxgac)608 static void rtw8852c_disable_rxagc(struct rtw89_dev *rtwdev, u8 path, u8 en_rxgac)
609 {
610 if (path == RF_PATH_A)
611 rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, en_rxgac);
612 else
613 rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, en_rxgac);
614 }
615
_iqk_rxk_setting(struct rtw89_dev * rtwdev,u8 path)616 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
617 {
618 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
619
620 if (path == RF_PATH_A)
621 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101);
622 else
623 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0202);
624
625 switch (iqk_info->iqk_bw[path]) {
626 case RTW89_CHANNEL_WIDTH_20:
627 case RTW89_CHANNEL_WIDTH_40:
628 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
629 rtw8852c_rxck_force(rtwdev, path, true, ADC_480M);
630 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x0);
631 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x3);
632 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xf);
633 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
634 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
635 break;
636 case RTW89_CHANNEL_WIDTH_80:
637 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
638 rtw8852c_rxck_force(rtwdev, path, true, ADC_960M);
639 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x1);
640 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x2);
641 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xd);
642 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
643 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
644 break;
645 case RTW89_CHANNEL_WIDTH_160:
646 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
647 rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
648 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x2);
649 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1);
650 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb);
651 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
652 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
653 break;
654 default:
655 break;
656 }
657
658 rtw89_rfk_parser(rtwdev, &rtw8852c_iqk_rxk_cfg_defs_tbl);
659
660 if (path == RF_PATH_A)
661 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1101);
662 else
663 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x2202);
664 }
665
_iqk_check_cal(struct rtw89_dev * rtwdev,u8 path,u8 ktype)666 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
667 {
668 u32 tmp;
669 u32 val;
670 int ret;
671
672 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
673 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
674 if (ret)
675 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
676
677 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
678 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
679 tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
680 rtw89_debug(rtwdev, RTW89_DBG_RFK,
681 "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
682
683 return false;
684 }
685
_iqk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path,u8 ktype)686 static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
687 enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
688 {
689 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
690 u32 addr_rfc_ctl = R_UPD_CLK + (path << 13);
691 u32 iqk_cmd;
692 bool fail;
693
694 switch (ktype) {
695 case ID_TXAGC:
696 iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
697 break;
698 case ID_A_FLOK_COARSE:
699 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
700 iqk_cmd = 0x008 | (1 << (4 + path));
701 break;
702 case ID_G_FLOK_COARSE:
703 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
704 iqk_cmd = 0x108 | (1 << (4 + path));
705 break;
706 case ID_A_FLOK_FINE:
707 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
708 iqk_cmd = 0x508 | (1 << (4 + path));
709 break;
710 case ID_G_FLOK_FINE:
711 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
712 iqk_cmd = 0x208 | (1 << (4 + path));
713 break;
714 case ID_FLOK_VBUFFER:
715 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
716 iqk_cmd = 0x308 | (1 << (4 + path));
717 break;
718 case ID_TXK:
719 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
720 iqk_cmd = 0x008 | (1 << (4 + path)) | ((0x8 + iqk_info->iqk_bw[path]) << 8);
721 break;
722 case ID_RXAGC:
723 iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
724 break;
725 case ID_RXK:
726 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
727 iqk_cmd = 0x008 | (1 << (4 + path)) | ((0xc + iqk_info->iqk_bw[path]) << 8);
728 break;
729 case ID_NBTXK:
730 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
731 iqk_cmd = 0x408 | (1 << (4 + path));
732 break;
733 case ID_NBRXK:
734 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
735 iqk_cmd = 0x608 | (1 << (4 + path));
736 break;
737 default:
738 return false;
739 }
740
741 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
742 fsleep(15);
743 fail = _iqk_check_cal(rtwdev, path, ktype);
744 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
745
746 return fail;
747 }
748
_rxk_group_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)749 static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
750 enum rtw89_phy_idx phy_idx, u8 path)
751 {
752 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
753 bool fail;
754 u32 tmp;
755 u32 bkrf0;
756 u8 gp;
757
758 bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
759 if (path == RF_PATH_B) {
760 rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
761 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
762 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
763 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
764 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
765 }
766
767 switch (iqk_info->iqk_band[path]) {
768 case RTW89_BAND_2G:
769 default:
770 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
771 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
772 rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
773 break;
774 case RTW89_BAND_5G:
775 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
776 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
777 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
778 break;
779 case RTW89_BAND_6G:
780 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
781 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
782 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
783 break;
784 }
785
786 fsleep(10);
787
788 for (gp = 0; gp < RXK_GROUP_NR; gp++) {
789 switch (iqk_info->iqk_band[path]) {
790 case RTW89_BAND_2G:
791 default:
792 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
793 _rxk_g_idxrxgain[gp]);
794 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF,
795 _rxk_g_idxattc2[gp]);
796 break;
797 case RTW89_BAND_5G:
798 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
799 _rxk_a_idxrxgain[gp]);
800 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
801 _rxk_a_idxattc2[gp]);
802 break;
803 case RTW89_BAND_6G:
804 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
805 _rxk_a6_idxrxgain[gp]);
806 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
807 _rxk_a6_idxattc2[gp]);
808 break;
809 }
810 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
811 B_CFIR_LUT_SEL, 0x1);
812 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
813 B_CFIR_LUT_SET, 0x0);
814 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
815 B_CFIR_LUT_GP_V1, gp);
816 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
817 }
818
819 if (path == RF_PATH_B)
820 rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
821 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
822
823 if (fail) {
824 iqk_info->nb_rxcfir[path] = 0x40000002;
825 iqk_info->is_wb_rxiqk[path] = false;
826 } else {
827 iqk_info->nb_rxcfir[path] = 0x40000000;
828 iqk_info->is_wb_rxiqk[path] = true;
829 }
830
831 return false;
832 }
833
_iqk_nbrxk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)834 static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
835 enum rtw89_phy_idx phy_idx, u8 path)
836 {
837 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
838 bool fail;
839 u32 tmp;
840 u32 bkrf0;
841 u8 gp = 0x2;
842
843 bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
844 if (path == RF_PATH_B) {
845 rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
846 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
847 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
848 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
849 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
850 }
851
852 switch (iqk_info->iqk_band[path]) {
853 case RTW89_BAND_2G:
854 default:
855 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
856 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
857 rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
858 break;
859 case RTW89_BAND_5G:
860 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
861 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
862 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
863 break;
864 case RTW89_BAND_6G:
865 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
866 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
867 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
868 break;
869 }
870
871 fsleep(10);
872
873 switch (iqk_info->iqk_band[path]) {
874 case RTW89_BAND_2G:
875 default:
876 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_g_idxrxgain[gp]);
877 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, _rxk_g_idxattc2[gp]);
878 break;
879 case RTW89_BAND_5G:
880 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a_idxrxgain[gp]);
881 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a_idxattc2[gp]);
882 break;
883 case RTW89_BAND_6G:
884 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a6_idxrxgain[gp]);
885 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a6_idxattc2[gp]);
886 break;
887 }
888
889 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
890 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
891 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
892 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
893
894 if (path == RF_PATH_B)
895 rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
896
897 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
898
899 if (fail)
900 iqk_info->nb_rxcfir[path] =
901 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
902 MASKDWORD) | 0x2;
903 else
904 iqk_info->nb_rxcfir[path] = 0x40000002;
905
906 iqk_info->is_wb_rxiqk[path] = false;
907 return fail;
908 }
909
_txk_group_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)910 static bool _txk_group_sel(struct rtw89_dev *rtwdev,
911 enum rtw89_phy_idx phy_idx, u8 path)
912 {
913 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
914 bool fail;
915 u8 gp;
916
917 for (gp = 0; gp < TXK_GROUP_NR; gp++) {
918 switch (iqk_info->iqk_band[path]) {
919 case RTW89_BAND_2G:
920 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
921 _txk_g_power_range[gp]);
922 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
923 _txk_g_track_range[gp]);
924 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
925 _txk_g_gain_bb[gp]);
926 rtw89_phy_write32_mask(rtwdev,
927 R_KIP_IQP + (path << 8),
928 MASKDWORD, _txk_g_itqt[gp]);
929 break;
930 case RTW89_BAND_5G:
931 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
932 _txk_a_power_range[gp]);
933 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
934 _txk_a_track_range[gp]);
935 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
936 _txk_a_gain_bb[gp]);
937 rtw89_phy_write32_mask(rtwdev,
938 R_KIP_IQP + (path << 8),
939 MASKDWORD, _txk_a_itqt[gp]);
940 break;
941 case RTW89_BAND_6G:
942 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
943 _txk_a6_power_range[gp]);
944 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
945 _txk_a6_track_range[gp]);
946 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
947 _txk_a6_gain_bb[gp]);
948 rtw89_phy_write32_mask(rtwdev,
949 R_KIP_IQP + (path << 8),
950 MASKDWORD, _txk_a6_itqt[gp]);
951 break;
952 default:
953 break;
954 }
955 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
956 B_CFIR_LUT_SEL, 0x1);
957 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
958 B_CFIR_LUT_SET, 0x1);
959 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
960 B_CFIR_LUT_G2, 0x0);
961 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
962 B_CFIR_LUT_GP, gp + 1);
963 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
964 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
965 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
966 }
967
968 if (fail) {
969 iqk_info->nb_txcfir[path] = 0x40000002;
970 iqk_info->is_wb_txiqk[path] = false;
971 } else {
972 iqk_info->nb_txcfir[path] = 0x40000000;
973 iqk_info->is_wb_txiqk[path] = true;
974 }
975
976 return fail;
977 }
978
_iqk_nbtxk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)979 static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
980 enum rtw89_phy_idx phy_idx, u8 path)
981 {
982 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
983 bool fail;
984 u8 gp = 0x2;
985
986 switch (iqk_info->iqk_band[path]) {
987 case RTW89_BAND_2G:
988 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_g_power_range[gp]);
989 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_g_track_range[gp]);
990 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_g_gain_bb[gp]);
991 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
992 MASKDWORD, _txk_g_itqt[gp]);
993 break;
994 case RTW89_BAND_5G:
995 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a_power_range[gp]);
996 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a_track_range[gp]);
997 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a_gain_bb[gp]);
998 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
999 MASKDWORD, _txk_a_itqt[gp]);
1000 break;
1001 case RTW89_BAND_6G:
1002 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a6_power_range[gp]);
1003 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a6_track_range[gp]);
1004 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a6_gain_bb[gp]);
1005 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1006 MASKDWORD, _txk_a6_itqt[gp]);
1007 break;
1008 default:
1009 break;
1010 }
1011
1012 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
1013 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
1014 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
1015 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp + 1);
1016 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
1017 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1018 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1019
1020 if (!fail)
1021 iqk_info->nb_txcfir[path] =
1022 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
1023 MASKDWORD) | 0x2;
1024 else
1025 iqk_info->nb_txcfir[path] = 0x40000002;
1026
1027 iqk_info->is_wb_txiqk[path] = false;
1028
1029 return fail;
1030 }
1031
_lok_finetune_check(struct rtw89_dev * rtwdev,u8 path)1032 static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
1033 {
1034 struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
1035 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1036 u8 idx = mcc_info->table_idx;
1037 bool is_fail1, is_fail2;
1038 u32 val;
1039 u32 core_i;
1040 u32 core_q;
1041 u32 vbuff_i;
1042 u32 vbuff_q;
1043
1044 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1045 val = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
1046 core_i = FIELD_GET(RR_TXMO_COI, val);
1047 core_q = FIELD_GET(RR_TXMO_COQ, val);
1048
1049 if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
1050 is_fail1 = true;
1051 else
1052 is_fail1 = false;
1053
1054 iqk_info->lok_idac[idx][path] = val;
1055
1056 val = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
1057 vbuff_i = FIELD_GET(RR_LOKVB_COI, val);
1058 vbuff_q = FIELD_GET(RR_LOKVB_COQ, val);
1059
1060 if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
1061 is_fail2 = true;
1062 else
1063 is_fail2 = false;
1064
1065 iqk_info->lok_vbuf[idx][path] = val;
1066
1067 return is_fail1 || is_fail2;
1068 }
1069
_iqk_lok(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1070 static bool _iqk_lok(struct rtw89_dev *rtwdev,
1071 enum rtw89_phy_idx phy_idx, u8 path)
1072 {
1073 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1074 u8 tmp_id = 0x0;
1075 bool fail = false;
1076 bool tmp = false;
1077
1078 /* Step 0: Init RF gain & tone idx= 8.25Mhz */
1079 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, IQK_DF4_TXT_8_25MHZ);
1080
1081 /* Step 1 START: _lok_coarse_fine_wi_swap */
1082 switch (iqk_info->iqk_band[path]) {
1083 case RTW89_BAND_2G:
1084 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1085 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1086 B_KIP_IQP_IQSW, 0x9);
1087 tmp_id = ID_G_FLOK_COARSE;
1088 break;
1089 case RTW89_BAND_5G:
1090 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1091 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1092 B_KIP_IQP_IQSW, 0x9);
1093 tmp_id = ID_A_FLOK_COARSE;
1094 break;
1095 case RTW89_BAND_6G:
1096 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1097 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1098 B_KIP_IQP_IQSW, 0x9);
1099 tmp_id = ID_A_FLOK_COARSE;
1100 break;
1101 default:
1102 break;
1103 }
1104 tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
1105 iqk_info->lok_cor_fail[0][path] = tmp;
1106
1107 /* Step 2 */
1108 switch (iqk_info->iqk_band[path]) {
1109 case RTW89_BAND_2G:
1110 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1111 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1112 B_KIP_IQP_IQSW, 0x1b);
1113 break;
1114 case RTW89_BAND_5G:
1115 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1116 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1117 B_KIP_IQP_IQSW, 0x1b);
1118 break;
1119 case RTW89_BAND_6G:
1120 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1121 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1122 B_KIP_IQP_IQSW, 0x1b);
1123 break;
1124 default:
1125 break;
1126 }
1127 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1128
1129 /* Step 3 */
1130 switch (iqk_info->iqk_band[path]) {
1131 case RTW89_BAND_2G:
1132 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1133 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1134 B_KIP_IQP_IQSW, 0x9);
1135 tmp_id = ID_G_FLOK_FINE;
1136 break;
1137 case RTW89_BAND_5G:
1138 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1139 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1140 B_KIP_IQP_IQSW, 0x9);
1141 tmp_id = ID_A_FLOK_FINE;
1142 break;
1143 case RTW89_BAND_6G:
1144 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1145 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1146 B_KIP_IQP_IQSW, 0x9);
1147 tmp_id = ID_A_FLOK_FINE;
1148 break;
1149 default:
1150 break;
1151 }
1152 tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
1153 iqk_info->lok_fin_fail[0][path] = tmp;
1154
1155 /* Step 4 large rf gain */
1156 switch (iqk_info->iqk_band[path]) {
1157 case RTW89_BAND_2G:
1158 default:
1159 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1160 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1161 B_KIP_IQP_IQSW, 0x1b);
1162 break;
1163 case RTW89_BAND_5G:
1164 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1165 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1166 B_KIP_IQP_IQSW, 0x1b);
1167 break;
1168 case RTW89_BAND_6G:
1169 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1170 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1171 B_KIP_IQP_IQSW, 0x1b);
1172 break;
1173 }
1174 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1175 fail = _lok_finetune_check(rtwdev, path);
1176
1177 return fail;
1178 }
1179
_iqk_txk_setting(struct rtw89_dev * rtwdev,u8 path)1180 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1181 {
1182 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1183
1184 switch (iqk_info->iqk_band[path]) {
1185 case RTW89_BAND_2G:
1186 default:
1187 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1188 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
1189 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1190 rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
1191 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1192 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1193 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1194 0x403e0 | iqk_info->syn1to2);
1195 fsleep(10);
1196 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1197 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1198 break;
1199 case RTW89_BAND_5G:
1200 rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
1201 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
1202 rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
1203 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1204 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1205 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1206 0x403e0 | iqk_info->syn1to2);
1207 fsleep(10);
1208 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1209 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1210 break;
1211 case RTW89_BAND_6G:
1212 rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
1213 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
1214 rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
1215 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1216 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1217 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1218 0x403e0 | iqk_info->syn1to2);
1219 fsleep(10);
1220 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1221 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1222 break;
1223 }
1224 }
1225
_iqk_info_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1226 static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1227 u8 path)
1228 {
1229 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1230 u32 tmp;
1231 bool flag;
1232
1233 iqk_info->thermal[path] =
1234 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
1235 iqk_info->thermal_rek_en = false;
1236 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %d\n", path,
1237 iqk_info->thermal[path]);
1238 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
1239 iqk_info->lok_cor_fail[0][path]);
1240 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
1241 iqk_info->lok_fin_fail[0][path]);
1242 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
1243 iqk_info->iqk_tx_fail[0][path]);
1244 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
1245 iqk_info->iqk_rx_fail[0][path]);
1246
1247 flag = iqk_info->lok_cor_fail[0][path];
1248 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
1249 flag = iqk_info->lok_fin_fail[0][path];
1250 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
1251 flag = iqk_info->iqk_tx_fail[0][path];
1252 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
1253 flag = iqk_info->iqk_rx_fail[0][path];
1254 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
1255
1256 tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
1257 iqk_info->bp_iqkenable[path] = tmp;
1258 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1259 iqk_info->bp_txkresult[path] = tmp;
1260 tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1261 iqk_info->bp_rxkresult[path] = tmp;
1262
1263 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
1264 iqk_info->iqk_times);
1265
1266 tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
1267 if (tmp != 0x0)
1268 iqk_info->iqk_fail_cnt++;
1269 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
1270 iqk_info->iqk_fail_cnt);
1271 }
1272
_iqk_by_path(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1273 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1274 {
1275 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1276
1277 _iqk_txk_setting(rtwdev, path);
1278 iqk_info->lok_fail[path] = _iqk_lok(rtwdev, phy_idx, path);
1279
1280 if (iqk_info->is_nbiqk)
1281 iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
1282 else
1283 iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
1284
1285 _iqk_rxk_setting(rtwdev, path);
1286 if (iqk_info->is_nbiqk)
1287 iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
1288 else
1289 iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
1290
1291 _iqk_info_iqk(rtwdev, phy_idx, path);
1292 }
1293
_iqk_get_ch_info(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 path)1294 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
1295 enum rtw89_phy_idx phy, u8 path)
1296 {
1297 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1298 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1299
1300 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1301
1302 iqk_info->iqk_band[path] = chan->band_type;
1303 iqk_info->iqk_bw[path] = chan->band_width;
1304 iqk_info->iqk_ch[path] = chan->channel;
1305
1306 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1307 "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
1308 iqk_info->iqk_band[path]);
1309 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
1310 path, iqk_info->iqk_bw[path]);
1311 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
1312 path, iqk_info->iqk_ch[path]);
1313 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1314 "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
1315 rtwdev->dbcc_en ? "on" : "off",
1316 iqk_info->iqk_band[path] == 0 ? "2G" :
1317 iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
1318 iqk_info->iqk_ch[path],
1319 iqk_info->iqk_bw[path] == 0 ? "20M" :
1320 iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
1321 if (!rtwdev->dbcc_en)
1322 iqk_info->syn1to2 = 0x1;
1323 else
1324 iqk_info->syn1to2 = 0x3;
1325
1326 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852C_IQK_VER);
1327 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
1328 iqk_info->iqk_band[path]);
1329 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
1330 iqk_info->iqk_bw[path]);
1331 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
1332 iqk_info->iqk_ch[path]);
1333
1334 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_NCTLV, RTW8852C_NCTL_VER);
1335 }
1336
_iqk_start_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1337 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1338 u8 path)
1339 {
1340 _iqk_by_path(rtwdev, phy_idx, path);
1341 }
1342
_iqk_restore(struct rtw89_dev * rtwdev,u8 path)1343 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1344 {
1345 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1346 bool fail;
1347
1348 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
1349 iqk_info->nb_txcfir[path]);
1350 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
1351 iqk_info->nb_rxcfir[path]);
1352 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1353 0x00001219 + (path << 4));
1354 fsleep(200);
1355 fail = _iqk_check_cal(rtwdev, path, 0x12);
1356 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail = %x\n", fail);
1357
1358 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1359 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1360 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1361
1362 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1363 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1364 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1365 }
1366
_iqk_afebb_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1367 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1368 enum rtw89_phy_idx phy_idx, u8 path)
1369 {
1370 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
1371 &rtw8852c_iqk_afebb_restore_defs_a_tbl,
1372 &rtw8852c_iqk_afebb_restore_defs_b_tbl);
1373
1374 rtw8852c_disable_rxagc(rtwdev, path, 0x1);
1375 }
1376
_iqk_preset(struct rtw89_dev * rtwdev,u8 path)1377 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1378 {
1379 struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
1380 u8 idx = 0;
1381
1382 idx = mcc_info->table_idx;
1383 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
1384 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
1385 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1386 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1387 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1388 }
1389
_iqk_macbb_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1390 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1391 enum rtw89_phy_idx phy_idx, u8 path)
1392 {
1393 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
1394
1395 /* 01_BB_AFE_for DPK_S0_20210820 */
1396 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
1397 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
1398 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
1399 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
1400 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
1401
1402 /* disable rxgac */
1403 rtw8852c_disable_rxagc(rtwdev, path, 0x0);
1404 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), MASKDWORD, 0xf801fffd);
1405 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_DIS, 0x1);
1406 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DAC_VAL, 0x1);
1407
1408 rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
1409 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_GDIS, 0x1);
1410
1411 rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
1412 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_ACK_VAL, 0x2);
1413
1414 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
1415 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
1416 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW | (path << 13), B_P0_NRBW_DBG, 0x1);
1417 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1418 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1419 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1420 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1421 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
1422 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
1423 }
1424
_rck(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)1425 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
1426 {
1427 u32 rf_reg5, rck_val = 0;
1428 u32 val;
1429 int ret;
1430
1431 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
1432
1433 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
1434
1435 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1436 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1437
1438 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
1439 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
1440
1441 /* RCK trigger */
1442 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
1443
1444 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
1445 false, rtwdev, path, 0x1c, BIT(3));
1446 if (ret)
1447 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
1448
1449 rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
1450 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
1451
1452 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
1453
1454 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1455 "[RCK] RF 0x1b / 0x1c = 0x%x / 0x%x\n",
1456 rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
1457 rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK));
1458 }
1459
_iqk_init(struct rtw89_dev * rtwdev)1460 static void _iqk_init(struct rtw89_dev *rtwdev)
1461 {
1462 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1463 u8 ch, path;
1464
1465 rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
1466 if (iqk_info->is_iqk_init)
1467 return;
1468
1469 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1470 iqk_info->is_iqk_init = true;
1471 iqk_info->is_nbiqk = false;
1472 iqk_info->iqk_fft_en = false;
1473 iqk_info->iqk_sram_en = false;
1474 iqk_info->iqk_cfir_en = false;
1475 iqk_info->iqk_xym_en = false;
1476 iqk_info->thermal_rek_en = false;
1477 iqk_info->iqk_times = 0x0;
1478
1479 for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
1480 iqk_info->iqk_channel[ch] = 0x0;
1481 for (path = 0; path < RTW8852C_IQK_SS; path++) {
1482 iqk_info->lok_cor_fail[ch][path] = false;
1483 iqk_info->lok_fin_fail[ch][path] = false;
1484 iqk_info->iqk_tx_fail[ch][path] = false;
1485 iqk_info->iqk_rx_fail[ch][path] = false;
1486 iqk_info->iqk_mcc_ch[ch][path] = 0x0;
1487 iqk_info->iqk_table_idx[path] = 0x0;
1488 }
1489 }
1490 }
1491
_doiqk(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy_idx,u8 path)1492 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1493 enum rtw89_phy_idx phy_idx, u8 path)
1494 {
1495 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1496 u32 backup_bb_val[BACKUP_BB_REGS_NR];
1497 u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR];
1498 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
1499
1500 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1501
1502 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1503 "[IQK]==========IQK strat!!!!!==========\n");
1504 iqk_info->iqk_times++;
1505 iqk_info->kcount = 0;
1506 iqk_info->version = RTW8852C_IQK_VER;
1507
1508 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1509 _iqk_get_ch_info(rtwdev, phy_idx, path);
1510 _rfk_backup_bb_reg(rtwdev, backup_bb_val);
1511 _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
1512 _iqk_macbb_setting(rtwdev, phy_idx, path);
1513 _iqk_preset(rtwdev, path);
1514 _iqk_start_iqk(rtwdev, phy_idx, path);
1515 _iqk_restore(rtwdev, path);
1516 _iqk_afebb_restore(rtwdev, phy_idx, path);
1517 _rfk_restore_bb_reg(rtwdev, backup_bb_val);
1518 _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
1519 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1520 }
1521
_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,bool force)1522 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
1523 {
1524 switch (_kpath(rtwdev, phy_idx)) {
1525 case RF_A:
1526 _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1527 break;
1528 case RF_B:
1529 _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1530 break;
1531 case RF_AB:
1532 _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1533 _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1534 break;
1535 default:
1536 break;
1537 }
1538 }
1539
_rx_dck_toggle(struct rtw89_dev * rtwdev,u8 path)1540 static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path)
1541 {
1542 int ret;
1543 u32 val;
1544
1545 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1546 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
1547
1548 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val,
1549 2, 2000, false, rtwdev, path,
1550 RR_DCK1, RR_DCK1_DONE);
1551 if (ret)
1552 rtw89_warn(rtwdev, "[RX_DCK] S%d RXDCK timeout\n", path);
1553 else
1554 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish\n", path);
1555
1556 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1557 }
1558
_set_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 path,bool is_afe)1559 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
1560 bool is_afe)
1561 {
1562 u8 res;
1563
1564 rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
1565
1566 _rx_dck_toggle(rtwdev, path);
1567 if (rtw89_read_rf(rtwdev, path, RR_DCKC, RR_DCKC_CHK) == 0)
1568 return;
1569 res = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_DONE);
1570 if (res > 1) {
1571 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, res);
1572 _rx_dck_toggle(rtwdev, path);
1573 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, 0x1);
1574 }
1575 }
1576
1577 #define RTW8852C_RF_REL_VERSION 34
1578 #define RTW8852C_DPK_VER 0x10
1579 #define RTW8852C_DPK_TH_AVG_NUM 4
1580 #define RTW8852C_DPK_RF_PATH 2
1581 #define RTW8852C_DPK_KIP_REG_NUM 5
1582 #define RTW8852C_DPK_RXSRAM_DBG 0
1583
1584 enum rtw8852c_dpk_id {
1585 LBK_RXIQK = 0x06,
1586 SYNC = 0x10,
1587 MDPK_IDL = 0x11,
1588 MDPK_MPA = 0x12,
1589 GAIN_LOSS = 0x13,
1590 GAIN_CAL = 0x14,
1591 DPK_RXAGC = 0x15,
1592 KIP_PRESET = 0x16,
1593 KIP_RESTORE = 0x17,
1594 DPK_TXAGC = 0x19,
1595 D_KIP_PRESET = 0x28,
1596 D_TXAGC = 0x29,
1597 D_RXAGC = 0x2a,
1598 D_SYNC = 0x2b,
1599 D_GAIN_LOSS = 0x2c,
1600 D_MDPK_IDL = 0x2d,
1601 D_GAIN_NORM = 0x2f,
1602 D_KIP_THERMAL = 0x30,
1603 D_KIP_RESTORE = 0x31
1604 };
1605
1606 #define DPK_TXAGC_LOWER 0x2e
1607 #define DPK_TXAGC_UPPER 0x3f
1608 #define DPK_TXAGC_INVAL 0xff
1609
1610 enum dpk_agc_step {
1611 DPK_AGC_STEP_SYNC_DGAIN,
1612 DPK_AGC_STEP_GAIN_LOSS_IDX,
1613 DPK_AGC_STEP_GL_GT_CRITERION,
1614 DPK_AGC_STEP_GL_LT_CRITERION,
1615 DPK_AGC_STEP_SET_TX_GAIN,
1616 };
1617
_rf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)1618 static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
1619 enum rtw89_rf_path path, bool is_bybb)
1620 {
1621 if (is_bybb)
1622 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1623 else
1624 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1625 }
1626
1627 static void _dpk_onoff(struct rtw89_dev *rtwdev,
1628 enum rtw89_rf_path path, bool off);
1629
_dpk_bkup_kip(struct rtw89_dev * rtwdev,const u32 reg[],u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM],u8 path)1630 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1631 u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
1632 {
1633 u8 i;
1634
1635 for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
1636 reg_bkup[path][i] =
1637 rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
1638
1639 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
1640 reg[i] + (path << 8), reg_bkup[path][i]);
1641 }
1642 }
1643
_dpk_reload_kip(struct rtw89_dev * rtwdev,const u32 reg[],u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM],u8 path)1644 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1645 u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
1646 {
1647 u8 i;
1648
1649 for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
1650 rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
1651 MASKDWORD, reg_bkup[path][i]);
1652 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
1653 reg[i] + (path << 8), reg_bkup[path][i]);
1654 }
1655 }
1656
_dpk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw8852c_dpk_id id)1657 static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1658 enum rtw89_rf_path path, enum rtw8852c_dpk_id id)
1659 {
1660 u16 dpk_cmd;
1661 u32 val;
1662 int ret;
1663
1664 dpk_cmd = (u16)((id << 8) | (0x19 + path * 0x12));
1665
1666 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1667
1668 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1669 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
1670 mdelay(10);
1671 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
1672
1673 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1674 "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
1675 id == 0x06 ? "LBK_RXIQK" :
1676 id == 0x10 ? "SYNC" :
1677 id == 0x11 ? "MDPK_IDL" :
1678 id == 0x12 ? "MDPK_MPA" :
1679 id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
1680 dpk_cmd, ret);
1681
1682 if (ret) {
1683 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1684 "[DPK] one-shot over 20ms!!!!\n");
1685 return 1;
1686 }
1687
1688 return 0;
1689 }
1690
_dpk_information(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1691 static void _dpk_information(struct rtw89_dev *rtwdev,
1692 enum rtw89_phy_idx phy,
1693 enum rtw89_rf_path path)
1694 {
1695 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1696 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1697
1698 u8 kidx = dpk->cur_idx[path];
1699
1700 dpk->bp[path][kidx].band = chan->band_type;
1701 dpk->bp[path][kidx].ch = chan->channel;
1702 dpk->bp[path][kidx].bw = chan->band_width;
1703
1704 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1705 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1706 path, dpk->cur_idx[path], phy,
1707 rtwdev->is_tssi_mode[path] ? "on" : "off",
1708 rtwdev->dbcc_en ? "on" : "off",
1709 dpk->bp[path][kidx].band == 0 ? "2G" :
1710 dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1711 dpk->bp[path][kidx].ch,
1712 dpk->bp[path][kidx].bw == 0 ? "20M" :
1713 dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1714 }
1715
_dpk_bb_afe_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath)1716 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
1717 enum rtw89_phy_idx phy,
1718 enum rtw89_rf_path path, u8 kpath)
1719 {
1720 /*1. Keep ADC_fifo reset*/
1721 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
1722 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
1723 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
1724 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
1725
1726 /*2. BB for IQK DBG mode*/
1727 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd);
1728
1729 /*3.Set DAC clk*/
1730 rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
1731
1732 /*4. Set ADC clk*/
1733 rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
1734 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1);
1735 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb);
1736 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
1737 B_P0_NRBW_DBG, 0x1);
1738 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
1739 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13);
1740 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001);
1741 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041);
1742
1743 /*5. ADDA fifo rst*/
1744 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
1745 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
1746
1747 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path);
1748 }
1749
_dpk_bb_afe_restore(struct rtw89_dev * rtwdev,u8 path)1750 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, u8 path)
1751 {
1752 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
1753 B_P0_NRBW_DBG, 0x0);
1754 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
1755 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
1756 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
1757 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
1758 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000);
1759 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00);
1760 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x0);
1761 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x0);
1762
1763 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path);
1764 }
1765
_dpk_tssi_pause(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_pause)1766 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1767 enum rtw89_rf_path path, bool is_pause)
1768 {
1769 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1770 B_P0_TSSI_TRK_EN, is_pause);
1771
1772 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1773 is_pause ? "pause" : "resume");
1774 }
1775
_dpk_kip_control_rfc(struct rtw89_dev * rtwdev,u8 path,bool ctrl_by_kip)1776 static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, u8 path, bool ctrl_by_kip)
1777 {
1778 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_IQK_RFC_ON, ctrl_by_kip);
1779 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n",
1780 ctrl_by_kip ? "KIP" : "BB");
1781 }
1782
_dpk_txpwr_bb_force(struct rtw89_dev * rtwdev,u8 path,bool force)1783 static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, u8 path, bool force)
1784 {
1785 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force);
1786 rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force);
1787
1788 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d txpwr_bb_force %s\n",
1789 path, force ? "on" : "off");
1790 }
1791
_dpk_kip_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1792 static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1793 enum rtw89_rf_path path)
1794 {
1795 _dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE);
1796 _dpk_kip_control_rfc(rtwdev, path, false);
1797 _dpk_txpwr_bb_force(rtwdev, path, false);
1798 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
1799 }
1800
_dpk_lbk_rxiqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1801 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
1802 enum rtw89_phy_idx phy,
1803 enum rtw89_rf_path path)
1804 {
1805 #define RX_TONE_IDX 0x00250025 /* Q.2 9.25MHz */
1806 u8 cur_rxbb;
1807 u32 rf_11, reg_81cc;
1808
1809 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
1810 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
1811
1812 _dpk_kip_control_rfc(rtwdev, path, false);
1813
1814 cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
1815 rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK);
1816 reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8),
1817 B_KIP_IQP_SW);
1818
1819 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1820 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3);
1821 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd);
1822 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, 0x1f);
1823
1824 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12);
1825 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3);
1826
1827 _dpk_kip_control_rfc(rtwdev, path, true);
1828
1829 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, RX_TONE_IDX);
1830
1831 _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
1832
1833 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
1834 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD));
1835
1836 _dpk_kip_control_rfc(rtwdev, path, false);
1837
1838 rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11);
1839 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, cur_rxbb);
1840 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc);
1841
1842 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
1843 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
1844 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
1845
1846 _dpk_kip_control_rfc(rtwdev, path, true);
1847 }
1848
_dpk_rf_setting(struct rtw89_dev * rtwdev,u8 gain,enum rtw89_rf_path path,u8 kidx)1849 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
1850 enum rtw89_rf_path path, u8 kidx)
1851 {
1852 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1853
1854 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
1855 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1856 0x50121 | BIT(rtwdev->dbcc_en));
1857 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
1858 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x2);
1859 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
1860 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1861 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1862
1863 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1864 "[DPK] RF 0x0/0x83/0x9e/0x1a/0xdf/0x1001a = 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x\n",
1865 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
1866 rtw89_read_rf(rtwdev, path, RR_RXBB, RFREG_MASK),
1867 rtw89_read_rf(rtwdev, path, RR_TIA, RFREG_MASK),
1868 rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK),
1869 rtw89_read_rf(rtwdev, path, RR_LUTDBG, RFREG_MASK),
1870 rtw89_read_rf(rtwdev, path, 0x1001a, RFREG_MASK));
1871 } else {
1872 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1873 0x50101 | BIT(rtwdev->dbcc_en));
1874 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
1875
1876 if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161) {
1877 rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8);
1878 rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
1879 } else {
1880 rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
1881 }
1882
1883 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0);
1884 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3);
1885 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1886 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1887
1888 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160)
1889 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0);
1890 }
1891 }
1892
_dpk_tpg_sel(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)1893 static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
1894 {
1895 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1896
1897 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) {
1898 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x3);
1899 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0x0180ff30);
1900 } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) {
1901 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0);
1902 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00);
1903 } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) {
1904 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
1905 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0);
1906 } else {
1907 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
1908 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0);
1909 }
1910 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
1911 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160 ? "160M" :
1912 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
1913 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
1914 }
1915
_dpk_sync_check(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)1916 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
1917 {
1918 #define DPK_SYNC_TH_DC_I 200
1919 #define DPK_SYNC_TH_DC_Q 200
1920 #define DPK_SYNC_TH_CORR 170
1921 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1922 u16 dc_i, dc_q;
1923 u8 corr_val, corr_idx, rxbb;
1924 u8 rxbb_ov;
1925
1926 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
1927
1928 corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
1929 corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
1930
1931 dpk->corr_idx[path][kidx] = corr_idx;
1932 dpk->corr_val[path][kidx] = corr_val;
1933
1934 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
1935
1936 dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
1937 dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
1938
1939 dc_i = abs(sign_extend32(dc_i, 11));
1940 dc_q = abs(sign_extend32(dc_q, 11));
1941
1942 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1943 "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n",
1944 path, corr_idx, corr_val, dc_i, dc_q);
1945
1946 dpk->dc_i[path][kidx] = dc_i;
1947 dpk->dc_q[path][kidx] = dc_q;
1948
1949 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8);
1950 rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB);
1951
1952 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31);
1953 rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV);
1954
1955 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1956 "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n",
1957 path, rxbb,
1958 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE),
1959 rxbb_ov);
1960
1961 if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
1962 corr_val < DPK_SYNC_TH_CORR)
1963 return true;
1964 else
1965 return false;
1966 }
1967
_dpk_dgain_read(struct rtw89_dev * rtwdev)1968 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
1969 {
1970 u16 dgain = 0x0;
1971
1972 rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
1973
1974 dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
1975
1976 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain, dgain);
1977
1978 return dgain;
1979 }
1980
_dpk_gainloss_read(struct rtw89_dev * rtwdev)1981 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
1982 {
1983 u8 result;
1984
1985 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
1986 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
1987
1988 result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
1989
1990 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result);
1991
1992 return result;
1993 }
1994
_dpk_kset_query(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)1995 static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
1996 {
1997 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1998
1999 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10);
2000 dpk->cur_k_set =
2001 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 0xE0000000) - 1;
2002 }
2003
_dpk_kip_set_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 dbm,bool set_from_bb)2004 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2005 enum rtw89_rf_path path, u8 dbm, bool set_from_bb)
2006 {
2007 if (set_from_bb) {
2008 dbm = clamp_t(u8, dbm, 7, 24);
2009 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set S%d txagc to %ddBm\n", path, dbm);
2010 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_VAL, dbm << 2);
2011 }
2012 _dpk_one_shot(rtwdev, phy, path, D_TXAGC);
2013 _dpk_kset_query(rtwdev, path);
2014 }
2015
_dpk_gainloss(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2016 static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2017 enum rtw89_rf_path path, u8 kidx)
2018 {
2019 _dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS);
2020 _dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false);
2021
2022 rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0x0);
2023 rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0);
2024
2025 return _dpk_gainloss_read(rtwdev);
2026 }
2027
_dpk_pas_read(struct rtw89_dev * rtwdev,bool is_check)2028 static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
2029 {
2030 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2031 u8 i;
2032
2033 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
2034 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
2035 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
2036
2037 if (is_check) {
2038 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2039 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2040 val1_i = abs(sign_extend32(val1_i, 11));
2041 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2042 val1_q = abs(sign_extend32(val1_q, 11));
2043
2044 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2045 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2046 val2_i = abs(sign_extend32(val2_i, 11));
2047 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2048 val2_q = abs(sign_extend32(val2_q, 11));
2049
2050 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2051 phy_div(val1_i * val1_i + val1_q * val1_q,
2052 val2_i * val2_i + val2_q * val2_q));
2053 } else {
2054 for (i = 0; i < 32; i++) {
2055 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2056 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2057 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2058 }
2059 }
2060
2061 if (val1_i * val1_i + val1_q * val1_q >= (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
2062 return true;
2063 else
2064 return false;
2065 }
2066
_dpk_kip_set_rxagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2067 static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2068 enum rtw89_rf_path path, u8 kidx)
2069 {
2070 _dpk_one_shot(rtwdev, phy, path, D_RXAGC);
2071
2072 return _dpk_sync_check(rtwdev, path, kidx);
2073 }
2074
_dpk_read_rxsram(struct rtw89_dev * rtwdev)2075 static void _dpk_read_rxsram(struct rtw89_dev *rtwdev)
2076 {
2077 u32 addr;
2078
2079 rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_pre_defs_tbl);
2080
2081 for (addr = 0; addr < 0x200; addr++) {
2082 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 | addr);
2083
2084 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RXSRAM[%03d] = 0x%07x\n", addr,
2085 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2086 }
2087
2088 rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_post_defs_tbl);
2089 }
2090
_dpk_bypass_rxiqc(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)2091 static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
2092 {
2093 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
2094 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002);
2095
2096 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n");
2097 }
2098
_dpk_agc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 init_xdbm,u8 loss_only)2099 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2100 enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only)
2101 {
2102 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2103 u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2104 u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0;
2105 u8 tmp_rxbb;
2106 u8 goout = 0, agc_cnt = 0;
2107 u16 dgain = 0;
2108 bool is_fail = false;
2109 int limit = 200;
2110
2111 do {
2112 switch (step) {
2113 case DPK_AGC_STEP_SYNC_DGAIN:
2114 is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx);
2115
2116 if (RTW8852C_DPK_RXSRAM_DBG)
2117 _dpk_read_rxsram(rtwdev);
2118
2119 if (is_fail) {
2120 goout = 1;
2121 break;
2122 }
2123
2124 dgain = _dpk_dgain_read(rtwdev);
2125
2126 if (dgain > 0x5fc || dgain < 0x556) {
2127 _dpk_one_shot(rtwdev, phy, path, D_SYNC);
2128 dgain = _dpk_dgain_read(rtwdev);
2129 }
2130
2131 if (agc_cnt == 0) {
2132 if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2133 _dpk_bypass_rxiqc(rtwdev, path);
2134 else
2135 _dpk_lbk_rxiqk(rtwdev, phy, path);
2136 }
2137 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2138 break;
2139
2140 case DPK_AGC_STEP_GAIN_LOSS_IDX:
2141 tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx);
2142
2143 if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
2144 tmp_gl_idx >= 7)
2145 step = DPK_AGC_STEP_GL_GT_CRITERION;
2146 else if (tmp_gl_idx == 0)
2147 step = DPK_AGC_STEP_GL_LT_CRITERION;
2148 else
2149 step = DPK_AGC_STEP_SET_TX_GAIN;
2150 break;
2151
2152 case DPK_AGC_STEP_GL_GT_CRITERION:
2153 if (tmp_dbm <= 7) {
2154 goout = 1;
2155 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@lower bound!!\n");
2156 } else {
2157 tmp_dbm = max_t(u8, tmp_dbm - 3, 7);
2158 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
2159 }
2160 step = DPK_AGC_STEP_SYNC_DGAIN;
2161 agc_cnt++;
2162 break;
2163
2164 case DPK_AGC_STEP_GL_LT_CRITERION:
2165 if (tmp_dbm >= 24) {
2166 goout = 1;
2167 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@upper bound!!\n");
2168 } else {
2169 tmp_dbm = min_t(u8, tmp_dbm + 2, 24);
2170 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
2171 }
2172 step = DPK_AGC_STEP_SYNC_DGAIN;
2173 agc_cnt++;
2174 break;
2175
2176 case DPK_AGC_STEP_SET_TX_GAIN:
2177 _dpk_kip_control_rfc(rtwdev, path, false);
2178 tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
2179 if (tmp_rxbb + tmp_gl_idx > 0x1f)
2180 tmp_rxbb = 0x1f;
2181 else
2182 tmp_rxbb = tmp_rxbb + tmp_gl_idx;
2183
2184 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
2185 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust RXBB (%+d) = 0x%x\n",
2186 tmp_gl_idx, tmp_rxbb);
2187 _dpk_kip_control_rfc(rtwdev, path, true);
2188 goout = 1;
2189 break;
2190 default:
2191 goout = 1;
2192 break;
2193 }
2194 } while (!goout && agc_cnt < 6 && --limit > 0);
2195
2196 if (limit <= 0)
2197 rtw89_warn(rtwdev, "[DPK] exceed loop limit\n");
2198
2199 return is_fail;
2200 }
2201
_dpk_set_mdpd_para(struct rtw89_dev * rtwdev,u8 order)2202 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
2203 {
2204 static const struct rtw89_rfk_tbl *order_tbls[] = {
2205 &rtw8852c_dpk_mdpd_order0_defs_tbl,
2206 &rtw8852c_dpk_mdpd_order1_defs_tbl,
2207 &rtw8852c_dpk_mdpd_order2_defs_tbl,
2208 &rtw8852c_dpk_mdpd_order3_defs_tbl,
2209 };
2210
2211 if (order >= ARRAY_SIZE(order_tbls)) {
2212 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2213 return;
2214 }
2215
2216 rtw89_rfk_parser(rtwdev, order_tbls[order]);
2217
2218 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n",
2219 order == 0x0 ? "(5,3,1)" :
2220 order == 0x1 ? "(5,3,0)" :
2221 order == 0x2 ? "(5,0,0)" : "(7,3,1)");
2222 }
2223
_dpk_idl_mpa(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2224 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2225 enum rtw89_rf_path path, u8 kidx)
2226 {
2227 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2228 u8 cnt;
2229 u8 ov_flag;
2230 u32 dpk_sync;
2231
2232 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1);
2233
2234 if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T2) == 0x1)
2235 _dpk_set_mdpd_para(rtwdev, 0x2);
2236 else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T1) == 0x1)
2237 _dpk_set_mdpd_para(rtwdev, 0x1);
2238 else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T0) == 0x1)
2239 _dpk_set_mdpd_para(rtwdev, 0x0);
2240 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_5 ||
2241 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_10 ||
2242 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_20)
2243 _dpk_set_mdpd_para(rtwdev, 0x2);
2244 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ||
2245 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
2246 _dpk_set_mdpd_para(rtwdev, 0x1);
2247 else
2248 _dpk_set_mdpd_para(rtwdev, 0x0);
2249
2250 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0);
2251 fsleep(1000);
2252
2253 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
2254 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2255 dpk_sync = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
2256 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] dpk_sync = 0x%x\n", dpk_sync);
2257
2258 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
2259 ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
2260 for (cnt = 0; cnt < 5 && ov_flag == 0x1; cnt++) {
2261 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ReK due to MDPK ov!!!\n");
2262 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
2263 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
2264 ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
2265 }
2266
2267 if (ov_flag) {
2268 _dpk_set_mdpd_para(rtwdev, 0x2);
2269 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
2270 }
2271 }
2272
_dpk_reload_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2273 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2274 enum rtw89_rf_path path)
2275 {
2276 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2277 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2278 bool is_reload = false;
2279 u8 idx, cur_band, cur_ch;
2280
2281 cur_band = chan->band_type;
2282 cur_ch = chan->channel;
2283
2284 for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2285 if (cur_band != dpk->bp[path][idx].band ||
2286 cur_ch != dpk->bp[path][idx].ch)
2287 continue;
2288
2289 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2290 B_COEF_SEL_MDPD, idx);
2291 dpk->cur_idx[path] = idx;
2292 is_reload = true;
2293 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2294 "[DPK] reload S%d[%d] success\n", path, idx);
2295 }
2296
2297 return is_reload;
2298 }
2299
_dpk_kip_pwr_clk_onoff(struct rtw89_dev * rtwdev,bool turn_on)2300 static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on)
2301 {
2302 rtw89_rfk_parser(rtwdev, turn_on ? &rtw8852c_dpk_kip_pwr_clk_on_defs_tbl :
2303 &rtw8852c_dpk_kip_pwr_clk_off_defs_tbl);
2304 }
2305
_dpk_kip_preset_8852c(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2306 static void _dpk_kip_preset_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2307 enum rtw89_rf_path path, u8 kidx)
2308 {
2309 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD,
2310 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
2311
2312 if (rtwdev->hal.cv == CHIP_CAV)
2313 rtw89_phy_write32_mask(rtwdev,
2314 R_DPD_CH0A + (path << 8) + (kidx << 2),
2315 B_DPD_SEL, 0x01);
2316 else
2317 rtw89_phy_write32_mask(rtwdev,
2318 R_DPD_CH0A + (path << 8) + (kidx << 2),
2319 B_DPD_SEL, 0x0c);
2320
2321 _dpk_kip_control_rfc(rtwdev, path, true);
2322 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
2323
2324 _dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET);
2325 }
2326
_dpk_para_query(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)2327 static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2328 {
2329 #define _DPK_PARA_TXAGC GENMASK(15, 10)
2330 #define _DPK_PARA_THER GENMASK(31, 26)
2331 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2332 u32 para;
2333
2334 para = rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
2335 MASKDWORD);
2336
2337 dpk->bp[path][kidx].txagc_dpk = FIELD_GET(_DPK_PARA_TXAGC, para);
2338 dpk->bp[path][kidx].ther_dpk = FIELD_GET(_DPK_PARA_THER, para);
2339
2340 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n",
2341 dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, dpk->bp[path][kidx].txagc_dpk);
2342 }
2343
_dpk_gain_normalize_8852c(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,bool is_execute)2344 static void _dpk_gain_normalize_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2345 enum rtw89_rf_path path, u8 kidx, bool is_execute)
2346 {
2347 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2348
2349 if (is_execute) {
2350 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_AG, 0x200);
2351 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_EN, 0x3);
2352
2353 _dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM);
2354 } else {
2355 rtw89_phy_write32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
2356 0x0000007F, 0x5b);
2357 }
2358 dpk->bp[path][kidx].gs =
2359 rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
2360 0x0000007F);
2361 }
2362
_dpk_order_convert(struct rtw89_dev * rtwdev)2363 static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
2364 {
2365 u32 val32 = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
2366 u8 val;
2367
2368 switch (val32) {
2369 case 0:
2370 val = 0x6;
2371 break;
2372 case 1:
2373 val = 0x2;
2374 break;
2375 case 2:
2376 val = 0x0;
2377 break;
2378 case 3:
2379 val = 0x7;
2380 break;
2381 default:
2382 val = 0xff;
2383 break;
2384 }
2385
2386 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
2387
2388 return val;
2389 }
2390
_dpk_on(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2391 static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2392 enum rtw89_rf_path path, u8 kidx)
2393 {
2394 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2395
2396 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2397 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
2398 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2399 B_DPD_ORDER, _dpk_order_convert(rtwdev));
2400
2401 dpk->bp[path][kidx].mdpd_en = BIT(dpk->cur_k_set);
2402 dpk->bp[path][kidx].path_ok = true;
2403
2404 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n",
2405 path, kidx, dpk->bp[path][kidx].mdpd_en);
2406
2407 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2408 B_DPD_MEN, dpk->bp[path][kidx].mdpd_en);
2409
2410 _dpk_gain_normalize_8852c(rtwdev, phy, path, kidx, false);
2411 }
2412
_dpk_main(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 gain)2413 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2414 enum rtw89_rf_path path, u8 gain)
2415 {
2416 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2417 u8 kidx = dpk->cur_idx[path];
2418 u8 init_xdbm = 15;
2419 bool is_fail;
2420
2421 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2422 "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
2423 _dpk_kip_control_rfc(rtwdev, path, false);
2424 _rf_direct_cntrl(rtwdev, path, false);
2425 rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd);
2426 _dpk_rf_setting(rtwdev, gain, path, kidx);
2427 _set_rx_dck(rtwdev, phy, path, false);
2428 _dpk_kip_pwr_clk_onoff(rtwdev, true);
2429 _dpk_kip_preset_8852c(rtwdev, phy, path, kidx);
2430 _dpk_txpwr_bb_force(rtwdev, path, true);
2431 _dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true);
2432 _dpk_tpg_sel(rtwdev, path, kidx);
2433
2434 is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false);
2435 if (is_fail)
2436 goto _error;
2437
2438 _dpk_idl_mpa(rtwdev, phy, path, kidx);
2439 _dpk_para_query(rtwdev, path, kidx);
2440 _dpk_on(rtwdev, phy, path, kidx);
2441
2442 _error:
2443 _dpk_kip_control_rfc(rtwdev, path, false);
2444 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX);
2445 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx,
2446 dpk->cur_k_set, is_fail ? "need Check" : "is Success");
2447
2448 return is_fail;
2449 }
2450
_dpk_init(struct rtw89_dev * rtwdev,u8 path)2451 static void _dpk_init(struct rtw89_dev *rtwdev, u8 path)
2452 {
2453 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2454 u8 kidx = dpk->cur_idx[path];
2455
2456 dpk->bp[path][kidx].path_ok = false;
2457 }
2458
_dpk_drf_direct_cntrl(struct rtw89_dev * rtwdev,u8 path,bool is_bybb)2459 static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_bybb)
2460 {
2461 if (is_bybb)
2462 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
2463 else
2464 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
2465 }
2466
_dpk_cal_select(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy,u8 kpath)2467 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
2468 enum rtw89_phy_idx phy, u8 kpath)
2469 {
2470 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2471 static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8};
2472 u32 backup_rf_val[RTW8852C_DPK_RF_PATH][BACKUP_RF_REGS_NR];
2473 u32 kip_bkup[RTW8852C_DPK_RF_PATH][RTW8852C_DPK_KIP_REG_NUM] = {};
2474 u8 path;
2475 bool is_fail = true, reloaded[RTW8852C_DPK_RF_PATH] = {false};
2476
2477 if (dpk->is_dpk_reload_en) {
2478 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2479 if (!(kpath & BIT(path)))
2480 continue;
2481
2482 reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
2483 if (!reloaded[path] && dpk->bp[path][0].ch != 0)
2484 dpk->cur_idx[path] = !dpk->cur_idx[path];
2485 else
2486 _dpk_onoff(rtwdev, path, false);
2487 }
2488 } else {
2489 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
2490 dpk->cur_idx[path] = 0;
2491 }
2492
2493 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2494 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2495 "[DPK] ========= S%d[%d] DPK Init =========\n",
2496 path, dpk->cur_idx[path]);
2497 _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
2498 _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
2499 _dpk_information(rtwdev, phy, path);
2500 _dpk_init(rtwdev, path);
2501 if (rtwdev->is_tssi_mode[path])
2502 _dpk_tssi_pause(rtwdev, path, true);
2503 }
2504
2505 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2506 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2507 "[DPK] ========= S%d[%d] DPK Start =========\n",
2508 path, dpk->cur_idx[path]);
2509 rtw8852c_disable_rxagc(rtwdev, path, 0x0);
2510 _dpk_drf_direct_cntrl(rtwdev, path, false);
2511 _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
2512 is_fail = _dpk_main(rtwdev, phy, path, 1);
2513 _dpk_onoff(rtwdev, path, is_fail);
2514 }
2515
2516 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2517 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2518 "[DPK] ========= S%d[%d] DPK Restore =========\n",
2519 path, dpk->cur_idx[path]);
2520 _dpk_kip_restore(rtwdev, phy, path);
2521 _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
2522 _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
2523 _dpk_bb_afe_restore(rtwdev, path);
2524 rtw8852c_disable_rxagc(rtwdev, path, 0x1);
2525 if (rtwdev->is_tssi_mode[path])
2526 _dpk_tssi_pause(rtwdev, path, false);
2527 }
2528
2529 _dpk_kip_pwr_clk_onoff(rtwdev, false);
2530 }
2531
_dpk_bypass_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2532 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2533 {
2534 struct rtw89_fem_info *fem = &rtwdev->fem;
2535 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2536 u8 band = chan->band_type;
2537
2538 if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) {
2539 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to CAV & not 2G!!\n");
2540 return true;
2541 } else if (fem->epa_2g && band == RTW89_BAND_2G) {
2542 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2543 return true;
2544 } else if (fem->epa_5g && band == RTW89_BAND_5G) {
2545 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2546 return true;
2547 } else if (fem->epa_6g && band == RTW89_BAND_6G) {
2548 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
2549 return true;
2550 }
2551
2552 return false;
2553 }
2554
_dpk_force_bypass(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2555 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2556 {
2557 u8 path, kpath;
2558
2559 kpath = _kpath(rtwdev, phy);
2560
2561 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2562 if (kpath & BIT(path))
2563 _dpk_onoff(rtwdev, path, true);
2564 }
2565 }
2566
_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool force)2567 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
2568 {
2569 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2570 "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
2571 RTW8852C_DPK_VER, rtwdev->hal.cv,
2572 RTW8852C_RF_REL_VERSION);
2573
2574 if (_dpk_bypass_check(rtwdev, phy))
2575 _dpk_force_bypass(rtwdev, phy);
2576 else
2577 _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
2578
2579 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1)
2580 rtw8852c_rx_dck(rtwdev, phy, false);
2581 }
2582
_dpk_onoff(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool off)2583 static void _dpk_onoff(struct rtw89_dev *rtwdev,
2584 enum rtw89_rf_path path, bool off)
2585 {
2586 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2587 u8 val, kidx = dpk->cur_idx[path];
2588
2589 val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok ?
2590 dpk->bp[path][kidx].mdpd_en : 0;
2591
2592 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2593 B_DPD_MEN, val);
2594
2595 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
2596 kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
2597 }
2598
_dpk_track(struct rtw89_dev * rtwdev)2599 static void _dpk_track(struct rtw89_dev *rtwdev)
2600 {
2601 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2602 u8 path, kidx;
2603 u8 txagc_rf = 0;
2604 s8 txagc_bb = 0, txagc_bb_tp = 0, txagc_ofst = 0;
2605 u8 cur_ther;
2606 s8 delta_ther = 0;
2607 s16 pwsf_tssi_ofst;
2608
2609 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2610 kidx = dpk->cur_idx[path];
2611 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2612 "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2613 path, kidx, dpk->bp[path][kidx].ch);
2614
2615 txagc_rf =
2616 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 0x0000003f);
2617 txagc_bb =
2618 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), MASKBYTE2);
2619 txagc_bb_tp =
2620 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), B_TXAGC_BTP);
2621
2622 /* report from KIP */
2623 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xf);
2624 cur_ther =
2625 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TH);
2626 txagc_ofst =
2627 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_OF);
2628 pwsf_tssi_ofst =
2629 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TSSI);
2630 pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12);
2631
2632 cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2633
2634 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2635 "[DPK_TRK] thermal now = %d\n", cur_ther);
2636
2637 if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
2638 delta_ther = dpk->bp[path][kidx].ther_dpk - cur_ther;
2639
2640 delta_ther = delta_ther * 1 / 2;
2641
2642 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2643 "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n",
2644 delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk);
2645 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2646 "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n",
2647 txagc_rf - dpk->bp[path][kidx].txagc_dpk, txagc_rf,
2648 dpk->bp[path][kidx].txagc_dpk);
2649 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2650 "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n",
2651 txagc_ofst, pwsf_tssi_ofst);
2652 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2653 "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2654 txagc_bb_tp, txagc_bb);
2655
2656 if (rtw89_phy_read32_mask(rtwdev, R_DPK_WR, B_DPK_WR_ST) == 0x0 &&
2657 txagc_rf != 0 && rtwdev->hal.cv == CHIP_CAV) {
2658 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2659 "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther);
2660
2661 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2662 0x07FC0000, 0x78 - delta_ther);
2663 }
2664 }
2665 }
2666
_tssi_set_sys(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2667 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2668 enum rtw89_rf_path path)
2669 {
2670 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2671 enum rtw89_band band = chan->band_type;
2672
2673 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl);
2674
2675 if (path == RF_PATH_A)
2676 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2677 &rtw8852c_tssi_sys_defs_2g_a_tbl,
2678 &rtw8852c_tssi_sys_defs_5g_a_tbl);
2679 else
2680 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2681 &rtw8852c_tssi_sys_defs_2g_b_tbl,
2682 &rtw8852c_tssi_sys_defs_5g_b_tbl);
2683 }
2684
_tssi_ini_txpwr_ctrl_bb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2685 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2686 enum rtw89_rf_path path)
2687 {
2688 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2689 &rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl,
2690 &rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl);
2691 }
2692
_tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2693 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2694 enum rtw89_phy_idx phy,
2695 enum rtw89_rf_path path)
2696 {
2697 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2698 &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
2699 &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
2700 }
2701
_tssi_set_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2702 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2703 enum rtw89_rf_path path)
2704 {
2705 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2706 enum rtw89_band band = chan->band_type;
2707
2708 if (path == RF_PATH_A) {
2709 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_a_tbl);
2710 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2711 &rtw8852c_tssi_dck_defs_2g_a_tbl,
2712 &rtw8852c_tssi_dck_defs_5g_a_tbl);
2713 } else {
2714 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_b_tbl);
2715 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2716 &rtw8852c_tssi_dck_defs_2g_b_tbl,
2717 &rtw8852c_tssi_dck_defs_5g_b_tbl);
2718 }
2719 }
2720
_tssi_set_bbgain_split(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2721 static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2722 enum rtw89_rf_path path)
2723 {
2724 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2725 &rtw8852c_tssi_set_bbgain_split_a_tbl,
2726 &rtw8852c_tssi_set_bbgain_split_b_tbl);
2727 }
2728
_tssi_set_tmeter_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2729 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2730 enum rtw89_rf_path path)
2731 {
2732 #define RTW8852C_TSSI_GET_VAL(ptr, idx) \
2733 ({ \
2734 s8 *__ptr = (ptr); \
2735 u8 __idx = (idx), __i, __v; \
2736 u32 __val = 0; \
2737 for (__i = 0; __i < 4; __i++) { \
2738 __v = (__ptr[__idx + __i]); \
2739 __val |= (__v << (8 * __i)); \
2740 } \
2741 __val; \
2742 })
2743 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2744 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2745 u8 ch = chan->channel;
2746 u8 subband = chan->subband_type;
2747 const s8 *thm_up_a = NULL;
2748 const s8 *thm_down_a = NULL;
2749 const s8 *thm_up_b = NULL;
2750 const s8 *thm_down_b = NULL;
2751 u8 thermal = 0xff;
2752 s8 thm_ofst[64] = {0};
2753 u32 tmp = 0;
2754 u8 i, j;
2755
2756 switch (subband) {
2757 default:
2758 case RTW89_CH_2G:
2759 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
2760 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
2761 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
2762 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
2763 break;
2764 case RTW89_CH_5G_BAND_1:
2765 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
2766 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
2767 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
2768 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
2769 break;
2770 case RTW89_CH_5G_BAND_3:
2771 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
2772 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
2773 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
2774 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
2775 break;
2776 case RTW89_CH_5G_BAND_4:
2777 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
2778 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
2779 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
2780 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
2781 break;
2782 case RTW89_CH_6G_BAND_IDX0:
2783 case RTW89_CH_6G_BAND_IDX1:
2784 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
2785 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
2786 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
2787 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
2788 break;
2789 case RTW89_CH_6G_BAND_IDX2:
2790 case RTW89_CH_6G_BAND_IDX3:
2791 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
2792 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
2793 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
2794 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
2795 break;
2796 case RTW89_CH_6G_BAND_IDX4:
2797 case RTW89_CH_6G_BAND_IDX5:
2798 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
2799 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
2800 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
2801 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
2802 break;
2803 case RTW89_CH_6G_BAND_IDX6:
2804 case RTW89_CH_6G_BAND_IDX7:
2805 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
2806 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
2807 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
2808 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
2809 break;
2810 }
2811
2812 if (path == RF_PATH_A) {
2813 thermal = tssi_info->thermal[RF_PATH_A];
2814
2815 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2816 "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
2817
2818 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
2819 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
2820
2821 if (thermal == 0xff) {
2822 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
2823 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
2824
2825 for (i = 0; i < 64; i += 4) {
2826 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
2827
2828 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2829 "[TSSI] write 0x%x val=0x%08x\n",
2830 0x5c00 + i, 0x0);
2831 }
2832
2833 } else {
2834 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
2835 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
2836 thermal);
2837
2838 i = 0;
2839 for (j = 0; j < 32; j++)
2840 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2841 -thm_down_a[i++] :
2842 -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
2843
2844 i = 1;
2845 for (j = 63; j >= 32; j--)
2846 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2847 thm_up_a[i++] :
2848 thm_up_a[DELTA_SWINGIDX_SIZE - 1];
2849
2850 for (i = 0; i < 64; i += 4) {
2851 tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
2852 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
2853
2854 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2855 "[TSSI] write 0x%x val=0x%08x\n",
2856 0x5c00 + i, tmp);
2857 }
2858 }
2859 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
2860 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
2861
2862 } else {
2863 thermal = tssi_info->thermal[RF_PATH_B];
2864
2865 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2866 "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
2867
2868 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
2869 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
2870
2871 if (thermal == 0xff) {
2872 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
2873 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
2874
2875 for (i = 0; i < 64; i += 4) {
2876 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
2877
2878 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2879 "[TSSI] write 0x%x val=0x%08x\n",
2880 0x7c00 + i, 0x0);
2881 }
2882
2883 } else {
2884 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
2885 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
2886 thermal);
2887
2888 i = 0;
2889 for (j = 0; j < 32; j++)
2890 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2891 -thm_down_b[i++] :
2892 -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
2893
2894 i = 1;
2895 for (j = 63; j >= 32; j--)
2896 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2897 thm_up_b[i++] :
2898 thm_up_b[DELTA_SWINGIDX_SIZE - 1];
2899
2900 for (i = 0; i < 64; i += 4) {
2901 tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
2902 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
2903
2904 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2905 "[TSSI] write 0x%x val=0x%08x\n",
2906 0x7c00 + i, tmp);
2907 }
2908 }
2909 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
2910 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
2911 }
2912 #undef RTW8852C_TSSI_GET_VAL
2913 }
2914
_tssi_slope_cal_org(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2915 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2916 enum rtw89_rf_path path)
2917 {
2918 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2919 enum rtw89_band band = chan->band_type;
2920
2921 if (path == RF_PATH_A) {
2922 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2923 &rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl,
2924 &rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl);
2925 } else {
2926 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2927 &rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl,
2928 &rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl);
2929 }
2930 }
2931
_tssi_set_aligk_default(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2932 static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2933 enum rtw89_rf_path path)
2934 {
2935 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2936 enum rtw89_band band = chan->band_type;
2937 const struct rtw89_rfk_tbl *tbl;
2938
2939 if (path == RF_PATH_A) {
2940 if (band == RTW89_BAND_2G)
2941 tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl;
2942 else if (band == RTW89_BAND_6G)
2943 tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl;
2944 else
2945 tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl;
2946 } else {
2947 if (band == RTW89_BAND_2G)
2948 tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl;
2949 else if (band == RTW89_BAND_6G)
2950 tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl;
2951 else
2952 tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl;
2953 }
2954
2955 rtw89_rfk_parser(rtwdev, tbl);
2956 }
2957
_tssi_set_slope(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2958 static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2959 enum rtw89_rf_path path)
2960 {
2961 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2962 &rtw8852c_tssi_slope_defs_a_tbl,
2963 &rtw8852c_tssi_slope_defs_b_tbl);
2964 }
2965
_tssi_run_slope(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2966 static void _tssi_run_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2967 enum rtw89_rf_path path)
2968 {
2969 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2970 &rtw8852c_tssi_run_slope_defs_a_tbl,
2971 &rtw8852c_tssi_run_slope_defs_b_tbl);
2972 }
2973
_tssi_set_track(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2974 static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2975 enum rtw89_rf_path path)
2976 {
2977 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2978 &rtw8852c_tssi_track_defs_a_tbl,
2979 &rtw8852c_tssi_track_defs_b_tbl);
2980 }
2981
_tssi_set_txagc_offset_mv_avg(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2982 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
2983 enum rtw89_phy_idx phy,
2984 enum rtw89_rf_path path)
2985 {
2986 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2987 &rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl,
2988 &rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl);
2989 }
2990
_tssi_enable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2991 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2992 {
2993 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2994 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
2995
2996 if (rtwdev->dbcc_en) {
2997 if (phy == RTW89_PHY_0) {
2998 path = RF_PATH_A;
2999 path_max = RF_PATH_B;
3000 } else if (phy == RTW89_PHY_1) {
3001 path = RF_PATH_B;
3002 path_max = RF_PATH_NUM_8852C;
3003 }
3004 }
3005
3006 for (i = path; i < path_max; i++) {
3007 _tssi_set_track(rtwdev, phy, i);
3008 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3009
3010 rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
3011 &rtw8852c_tssi_enable_defs_a_tbl,
3012 &rtw8852c_tssi_enable_defs_b_tbl);
3013
3014 tssi_info->base_thermal[i] =
3015 ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
3016 rtwdev->is_tssi_mode[i] = true;
3017 }
3018 }
3019
_tssi_disable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3020 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3021 {
3022 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
3023
3024 if (rtwdev->dbcc_en) {
3025 if (phy == RTW89_PHY_0) {
3026 path = RF_PATH_A;
3027 path_max = RF_PATH_B;
3028 } else if (phy == RTW89_PHY_1) {
3029 path = RF_PATH_B;
3030 path_max = RF_PATH_NUM_8852C;
3031 }
3032 }
3033
3034 for (i = path; i < path_max; i++) {
3035 if (i == RF_PATH_A) {
3036 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_a_tbl);
3037 rtwdev->is_tssi_mode[RF_PATH_A] = false;
3038 } else if (i == RF_PATH_B) {
3039 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_b_tbl);
3040 rtwdev->is_tssi_mode[RF_PATH_B] = false;
3041 }
3042 }
3043 }
3044
_tssi_get_cck_group(struct rtw89_dev * rtwdev,u8 ch)3045 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3046 {
3047 switch (ch) {
3048 case 1 ... 2:
3049 return 0;
3050 case 3 ... 5:
3051 return 1;
3052 case 6 ... 8:
3053 return 2;
3054 case 9 ... 11:
3055 return 3;
3056 case 12 ... 13:
3057 return 4;
3058 case 14:
3059 return 5;
3060 }
3061
3062 return 0;
3063 }
3064
3065 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
3066 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3067 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3068 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3069 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3070
_tssi_get_ofdm_group(struct rtw89_dev * rtwdev,u8 ch)3071 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3072 {
3073 switch (ch) {
3074 case 1 ... 2:
3075 return 0;
3076 case 3 ... 5:
3077 return 1;
3078 case 6 ... 8:
3079 return 2;
3080 case 9 ... 11:
3081 return 3;
3082 case 12 ... 14:
3083 return 4;
3084 case 36 ... 40:
3085 return 5;
3086 case 41 ... 43:
3087 return TSSI_EXTRA_GROUP(5);
3088 case 44 ... 48:
3089 return 6;
3090 case 49 ... 51:
3091 return TSSI_EXTRA_GROUP(6);
3092 case 52 ... 56:
3093 return 7;
3094 case 57 ... 59:
3095 return TSSI_EXTRA_GROUP(7);
3096 case 60 ... 64:
3097 return 8;
3098 case 100 ... 104:
3099 return 9;
3100 case 105 ... 107:
3101 return TSSI_EXTRA_GROUP(9);
3102 case 108 ... 112:
3103 return 10;
3104 case 113 ... 115:
3105 return TSSI_EXTRA_GROUP(10);
3106 case 116 ... 120:
3107 return 11;
3108 case 121 ... 123:
3109 return TSSI_EXTRA_GROUP(11);
3110 case 124 ... 128:
3111 return 12;
3112 case 129 ... 131:
3113 return TSSI_EXTRA_GROUP(12);
3114 case 132 ... 136:
3115 return 13;
3116 case 137 ... 139:
3117 return TSSI_EXTRA_GROUP(13);
3118 case 140 ... 144:
3119 return 14;
3120 case 149 ... 153:
3121 return 15;
3122 case 154 ... 156:
3123 return TSSI_EXTRA_GROUP(15);
3124 case 157 ... 161:
3125 return 16;
3126 case 162 ... 164:
3127 return TSSI_EXTRA_GROUP(16);
3128 case 165 ... 169:
3129 return 17;
3130 case 170 ... 172:
3131 return TSSI_EXTRA_GROUP(17);
3132 case 173 ... 177:
3133 return 18;
3134 }
3135
3136 return 0;
3137 }
3138
_tssi_get_6g_ofdm_group(struct rtw89_dev * rtwdev,u8 ch)3139 static u32 _tssi_get_6g_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3140 {
3141 switch (ch) {
3142 case 1 ... 5:
3143 return 0;
3144 case 6 ... 8:
3145 return TSSI_EXTRA_GROUP(0);
3146 case 9 ... 13:
3147 return 1;
3148 case 14 ... 16:
3149 return TSSI_EXTRA_GROUP(1);
3150 case 17 ... 21:
3151 return 2;
3152 case 22 ... 24:
3153 return TSSI_EXTRA_GROUP(2);
3154 case 25 ... 29:
3155 return 3;
3156 case 33 ... 37:
3157 return 4;
3158 case 38 ... 40:
3159 return TSSI_EXTRA_GROUP(4);
3160 case 41 ... 45:
3161 return 5;
3162 case 46 ... 48:
3163 return TSSI_EXTRA_GROUP(5);
3164 case 49 ... 53:
3165 return 6;
3166 case 54 ... 56:
3167 return TSSI_EXTRA_GROUP(6);
3168 case 57 ... 61:
3169 return 7;
3170 case 65 ... 69:
3171 return 8;
3172 case 70 ... 72:
3173 return TSSI_EXTRA_GROUP(8);
3174 case 73 ... 77:
3175 return 9;
3176 case 78 ... 80:
3177 return TSSI_EXTRA_GROUP(9);
3178 case 81 ... 85:
3179 return 10;
3180 case 86 ... 88:
3181 return TSSI_EXTRA_GROUP(10);
3182 case 89 ... 93:
3183 return 11;
3184 case 97 ... 101:
3185 return 12;
3186 case 102 ... 104:
3187 return TSSI_EXTRA_GROUP(12);
3188 case 105 ... 109:
3189 return 13;
3190 case 110 ... 112:
3191 return TSSI_EXTRA_GROUP(13);
3192 case 113 ... 117:
3193 return 14;
3194 case 118 ... 120:
3195 return TSSI_EXTRA_GROUP(14);
3196 case 121 ... 125:
3197 return 15;
3198 case 129 ... 133:
3199 return 16;
3200 case 134 ... 136:
3201 return TSSI_EXTRA_GROUP(16);
3202 case 137 ... 141:
3203 return 17;
3204 case 142 ... 144:
3205 return TSSI_EXTRA_GROUP(17);
3206 case 145 ... 149:
3207 return 18;
3208 case 150 ... 152:
3209 return TSSI_EXTRA_GROUP(18);
3210 case 153 ... 157:
3211 return 19;
3212 case 161 ... 165:
3213 return 20;
3214 case 166 ... 168:
3215 return TSSI_EXTRA_GROUP(20);
3216 case 169 ... 173:
3217 return 21;
3218 case 174 ... 176:
3219 return TSSI_EXTRA_GROUP(21);
3220 case 177 ... 181:
3221 return 22;
3222 case 182 ... 184:
3223 return TSSI_EXTRA_GROUP(22);
3224 case 185 ... 189:
3225 return 23;
3226 case 193 ... 197:
3227 return 24;
3228 case 198 ... 200:
3229 return TSSI_EXTRA_GROUP(24);
3230 case 201 ... 205:
3231 return 25;
3232 case 206 ... 208:
3233 return TSSI_EXTRA_GROUP(25);
3234 case 209 ... 213:
3235 return 26;
3236 case 214 ... 216:
3237 return TSSI_EXTRA_GROUP(26);
3238 case 217 ... 221:
3239 return 27;
3240 case 225 ... 229:
3241 return 28;
3242 case 230 ... 232:
3243 return TSSI_EXTRA_GROUP(28);
3244 case 233 ... 237:
3245 return 29;
3246 case 238 ... 240:
3247 return TSSI_EXTRA_GROUP(29);
3248 case 241 ... 245:
3249 return 30;
3250 case 246 ... 248:
3251 return TSSI_EXTRA_GROUP(30);
3252 case 249 ... 253:
3253 return 31;
3254 }
3255
3256 return 0;
3257 }
3258
_tssi_get_trim_group(struct rtw89_dev * rtwdev,u8 ch)3259 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3260 {
3261 switch (ch) {
3262 case 1 ... 8:
3263 return 0;
3264 case 9 ... 14:
3265 return 1;
3266 case 36 ... 48:
3267 return 2;
3268 case 49 ... 51:
3269 return TSSI_EXTRA_GROUP(2);
3270 case 52 ... 64:
3271 return 3;
3272 case 100 ... 112:
3273 return 4;
3274 case 113 ... 115:
3275 return TSSI_EXTRA_GROUP(4);
3276 case 116 ... 128:
3277 return 5;
3278 case 132 ... 144:
3279 return 6;
3280 case 149 ... 177:
3281 return 7;
3282 }
3283
3284 return 0;
3285 }
3286
_tssi_get_6g_trim_group(struct rtw89_dev * rtwdev,u8 ch)3287 static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3288 {
3289 switch (ch) {
3290 case 1 ... 13:
3291 return 0;
3292 case 14 ... 16:
3293 return TSSI_EXTRA_GROUP(0);
3294 case 17 ... 29:
3295 return 1;
3296 case 33 ... 45:
3297 return 2;
3298 case 46 ... 48:
3299 return TSSI_EXTRA_GROUP(2);
3300 case 49 ... 61:
3301 return 3;
3302 case 65 ... 77:
3303 return 4;
3304 case 78 ... 80:
3305 return TSSI_EXTRA_GROUP(4);
3306 case 81 ... 93:
3307 return 5;
3308 case 97 ... 109:
3309 return 6;
3310 case 110 ... 112:
3311 return TSSI_EXTRA_GROUP(6);
3312 case 113 ... 125:
3313 return 7;
3314 case 129 ... 141:
3315 return 8;
3316 case 142 ... 144:
3317 return TSSI_EXTRA_GROUP(8);
3318 case 145 ... 157:
3319 return 9;
3320 case 161 ... 173:
3321 return 10;
3322 case 174 ... 176:
3323 return TSSI_EXTRA_GROUP(10);
3324 case 177 ... 189:
3325 return 11;
3326 case 193 ... 205:
3327 return 12;
3328 case 206 ... 208:
3329 return TSSI_EXTRA_GROUP(12);
3330 case 209 ... 221:
3331 return 13;
3332 case 225 ... 237:
3333 return 14;
3334 case 238 ... 240:
3335 return TSSI_EXTRA_GROUP(14);
3336 case 241 ... 253:
3337 return 15;
3338 }
3339
3340 return 0;
3341 }
3342
_tssi_get_ofdm_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3343 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3344 enum rtw89_rf_path path)
3345 {
3346 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3347 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3348 enum rtw89_band band = chan->band_type;
3349 u8 ch = chan->channel;
3350 u32 gidx, gidx_1st, gidx_2nd;
3351 s8 de_1st;
3352 s8 de_2nd;
3353 s8 val;
3354
3355 if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
3356 gidx = _tssi_get_ofdm_group(rtwdev, ch);
3357
3358 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3359 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
3360 path, gidx);
3361
3362 if (IS_TSSI_EXTRA_GROUP(gidx)) {
3363 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3364 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3365 de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3366 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3367 val = (de_1st + de_2nd) / 2;
3368
3369 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3370 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3371 path, val, de_1st, de_2nd);
3372 } else {
3373 val = tssi_info->tssi_mcs[path][gidx];
3374
3375 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3376 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3377 }
3378 } else {
3379 gidx = _tssi_get_6g_ofdm_group(rtwdev, ch);
3380
3381 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3382 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
3383 path, gidx);
3384
3385 if (IS_TSSI_EXTRA_GROUP(gidx)) {
3386 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3387 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3388 de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
3389 de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
3390 val = (de_1st + de_2nd) / 2;
3391
3392 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3393 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3394 path, val, de_1st, de_2nd);
3395 } else {
3396 val = tssi_info->tssi_6g_mcs[path][gidx];
3397
3398 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3399 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3400 }
3401 }
3402
3403 return val;
3404 }
3405
_tssi_get_ofdm_trim_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3406 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
3407 enum rtw89_phy_idx phy,
3408 enum rtw89_rf_path path)
3409 {
3410 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3411 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3412 enum rtw89_band band = chan->band_type;
3413 u8 ch = chan->channel;
3414 u32 tgidx, tgidx_1st, tgidx_2nd;
3415 s8 tde_1st = 0;
3416 s8 tde_2nd = 0;
3417 s8 val;
3418
3419 if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
3420 tgidx = _tssi_get_trim_group(rtwdev, ch);
3421
3422 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3423 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3424 path, tgidx);
3425
3426 if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3427 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3428 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3429 tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3430 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3431 val = (tde_1st + tde_2nd) / 2;
3432
3433 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3434 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3435 path, val, tde_1st, tde_2nd);
3436 } else {
3437 val = tssi_info->tssi_trim[path][tgidx];
3438
3439 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3440 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3441 path, val);
3442 }
3443 } else {
3444 tgidx = _tssi_get_6g_trim_group(rtwdev, ch);
3445
3446 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3447 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3448 path, tgidx);
3449
3450 if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3451 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3452 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3453 tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
3454 tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
3455 val = (tde_1st + tde_2nd) / 2;
3456
3457 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3458 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3459 path, val, tde_1st, tde_2nd);
3460 } else {
3461 val = tssi_info->tssi_trim_6g[path][tgidx];
3462
3463 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3464 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3465 path, val);
3466 }
3467 }
3468
3469 return val;
3470 }
3471
_tssi_set_efuse_to_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3472 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
3473 enum rtw89_phy_idx phy)
3474 {
3475 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3476 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3477 u8 ch = chan->channel;
3478 u8 gidx;
3479 s8 ofdm_de;
3480 s8 trim_de;
3481 s32 val;
3482 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
3483
3484 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3485 phy, ch);
3486
3487 if (rtwdev->dbcc_en) {
3488 if (phy == RTW89_PHY_0) {
3489 path = RF_PATH_A;
3490 path_max = RF_PATH_B;
3491 } else if (phy == RTW89_PHY_1) {
3492 path = RF_PATH_B;
3493 path_max = RF_PATH_NUM_8852C;
3494 }
3495 }
3496
3497 for (i = path; i < path_max; i++) {
3498 gidx = _tssi_get_cck_group(rtwdev, ch);
3499 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3500 val = tssi_info->tssi_cck[i][gidx] + trim_de;
3501
3502 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3503 "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3504 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3505
3506 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
3507 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
3508
3509 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3510 "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3511 _tssi_de_cck_long[i],
3512 rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
3513 _TSSI_DE_MASK));
3514
3515 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
3516 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3517 val = ofdm_de + trim_de;
3518
3519 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3520 "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3521 i, ofdm_de, trim_de);
3522
3523 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
3524 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
3525 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
3526 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
3527 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
3528 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
3529
3530 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3531 "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3532 _tssi_de_mcs_20m[i],
3533 rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
3534 _TSSI_DE_MASK));
3535 }
3536 }
3537
rtw8852c_tssi_cont_en(struct rtw89_dev * rtwdev,bool en,enum rtw89_rf_path path)3538 static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
3539 enum rtw89_rf_path path)
3540 {
3541 static const u32 tssi_trk[2] = {0x5818, 0x7818};
3542 static const u32 tssi_en[2] = {0x5820, 0x7820};
3543
3544 if (en) {
3545 rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0);
3546 rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0);
3547 if (rtwdev->dbcc_en && path == RF_PATH_B)
3548 _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1);
3549 else
3550 _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0);
3551 } else {
3552 rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1);
3553 rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1);
3554 }
3555 }
3556
rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev * rtwdev,bool en,u8 phy_idx)3557 void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
3558 {
3559 if (!rtwdev->dbcc_en) {
3560 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
3561 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
3562 } else {
3563 if (phy_idx == RTW89_PHY_0)
3564 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
3565 else
3566 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
3567 }
3568 }
3569
_bw_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,enum rtw89_bandwidth bw,bool is_dav)3570 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
3571 enum rtw89_bandwidth bw, bool is_dav)
3572 {
3573 u32 rf_reg18;
3574 u32 reg_reg18_addr;
3575
3576 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
3577 if (is_dav)
3578 reg_reg18_addr = RR_CFGCH;
3579 else
3580 reg_reg18_addr = RR_CFGCH_V1;
3581
3582 rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
3583 rf_reg18 &= ~RR_CFGCH_BW;
3584
3585 switch (bw) {
3586 case RTW89_CHANNEL_WIDTH_5:
3587 case RTW89_CHANNEL_WIDTH_10:
3588 case RTW89_CHANNEL_WIDTH_20:
3589 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
3590 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
3591 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
3592 break;
3593 case RTW89_CHANNEL_WIDTH_40:
3594 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
3595 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
3596 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
3597 break;
3598 case RTW89_CHANNEL_WIDTH_80:
3599 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
3600 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x2);
3601 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xd);
3602 break;
3603 case RTW89_CHANNEL_WIDTH_160:
3604 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_160M);
3605 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
3606 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
3607 break;
3608 default:
3609 break;
3610 }
3611
3612 rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
3613 }
3614
_ctrl_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)3615 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3616 enum rtw89_bandwidth bw)
3617 {
3618 bool is_dav;
3619 u8 kpath, path;
3620 u32 tmp = 0;
3621
3622 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
3623 kpath = _kpath(rtwdev, phy);
3624
3625 for (path = 0; path < 2; path++) {
3626 if (!(kpath & BIT(path)))
3627 continue;
3628
3629 is_dav = true;
3630 _bw_setting(rtwdev, path, bw, is_dav);
3631 is_dav = false;
3632 _bw_setting(rtwdev, path, bw, is_dav);
3633 if (rtwdev->dbcc_en)
3634 continue;
3635
3636 if (path == RF_PATH_B && rtwdev->hal.cv == CHIP_CAV) {
3637 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
3638 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
3639 rtw89_write_rf(rtwdev, RF_PATH_B, RR_APK, RR_APK_MOD, 0x3);
3640 rtw89_write_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK, tmp);
3641 fsleep(100);
3642 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
3643 }
3644 }
3645 }
3646
_ch_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 central_ch,enum rtw89_band band,bool is_dav)3647 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
3648 u8 central_ch, enum rtw89_band band, bool is_dav)
3649 {
3650 u32 rf_reg18;
3651 u32 reg_reg18_addr;
3652
3653 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
3654 if (is_dav)
3655 reg_reg18_addr = 0x18;
3656 else
3657 reg_reg18_addr = 0x10018;
3658
3659 rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
3660 rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BAND0 | RR_CFGCH_CH);
3661 rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
3662
3663 switch (band) {
3664 case RTW89_BAND_2G:
3665 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_2G);
3666 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_2G);
3667 break;
3668 case RTW89_BAND_5G:
3669 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G);
3670 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
3671 break;
3672 case RTW89_BAND_6G:
3673 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_6G);
3674 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_6G);
3675 break;
3676 default:
3677 break;
3678 }
3679 rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
3680 fsleep(100);
3681 }
3682
_ctrl_ch(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 central_ch,enum rtw89_band band)3683 static void _ctrl_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3684 u8 central_ch, enum rtw89_band band)
3685 {
3686 u8 kpath, path;
3687
3688 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
3689 if (band != RTW89_BAND_6G) {
3690 if ((central_ch > 14 && central_ch < 36) ||
3691 (central_ch > 64 && central_ch < 100) ||
3692 (central_ch > 144 && central_ch < 149) || central_ch > 177)
3693 return;
3694 } else {
3695 if (central_ch > 253 || central_ch == 2)
3696 return;
3697 }
3698
3699 kpath = _kpath(rtwdev, phy);
3700
3701 for (path = 0; path < 2; path++) {
3702 if (kpath & BIT(path)) {
3703 _ch_setting(rtwdev, path, central_ch, band, true);
3704 _ch_setting(rtwdev, path, central_ch, band, false);
3705 }
3706 }
3707 }
3708
_rxbb_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)3709 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3710 enum rtw89_bandwidth bw)
3711 {
3712 u8 kpath;
3713 u8 path;
3714 u32 val;
3715
3716 kpath = _kpath(rtwdev, phy);
3717 for (path = 0; path < 2; path++) {
3718 if (!(kpath & BIT(path)))
3719 continue;
3720
3721 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
3722 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0xa);
3723 switch (bw) {
3724 case RTW89_CHANNEL_WIDTH_20:
3725 val = 0x1b;
3726 break;
3727 case RTW89_CHANNEL_WIDTH_40:
3728 val = 0x13;
3729 break;
3730 case RTW89_CHANNEL_WIDTH_80:
3731 val = 0xb;
3732 break;
3733 case RTW89_CHANNEL_WIDTH_160:
3734 default:
3735 val = 0x3;
3736 break;
3737 }
3738 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, val);
3739 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
3740 }
3741 }
3742
_lck_keep_thermal(struct rtw89_dev * rtwdev)3743 static void _lck_keep_thermal(struct rtw89_dev *rtwdev)
3744 {
3745 struct rtw89_lck_info *lck = &rtwdev->lck;
3746 int path;
3747
3748 for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
3749 lck->thermal[path] =
3750 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
3751 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
3752 "[LCK] path=%d thermal=0x%x", path, lck->thermal[path]);
3753 }
3754 }
3755
_lck(struct rtw89_dev * rtwdev)3756 static void _lck(struct rtw89_dev *rtwdev)
3757 {
3758 u32 tmp18[2];
3759 int path = rtwdev->dbcc_en ? 2 : 1;
3760 int i;
3761
3762 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[LCK] DO LCK\n");
3763
3764 tmp18[0] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
3765 tmp18[1] = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
3766
3767 for (i = 0; i < path; i++) {
3768 rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
3769 rtw89_write_rf(rtwdev, i, RR_CFGCH, RFREG_MASK, tmp18[i]);
3770 rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
3771 }
3772
3773 _lck_keep_thermal(rtwdev);
3774 }
3775
3776 #define RTW8852C_LCK_TH 8
3777
rtw8852c_lck_track(struct rtw89_dev * rtwdev)3778 void rtw8852c_lck_track(struct rtw89_dev *rtwdev)
3779 {
3780 struct rtw89_lck_info *lck = &rtwdev->lck;
3781 u8 cur_thermal;
3782 int delta;
3783 int path;
3784
3785 for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
3786 cur_thermal =
3787 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
3788 delta = abs((int)cur_thermal - lck->thermal[path]);
3789
3790 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
3791 "[LCK] path=%d current thermal=0x%x delta=0x%x\n",
3792 path, cur_thermal, delta);
3793
3794 if (delta >= RTW8852C_LCK_TH) {
3795 _lck(rtwdev);
3796 return;
3797 }
3798 }
3799 }
3800
rtw8852c_lck_init(struct rtw89_dev * rtwdev)3801 void rtw8852c_lck_init(struct rtw89_dev *rtwdev)
3802 {
3803 _lck_keep_thermal(rtwdev);
3804 }
3805
3806 static
rtw8852c_ctrl_bw_ch(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 central_ch,enum rtw89_band band,enum rtw89_bandwidth bw)3807 void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3808 u8 central_ch, enum rtw89_band band,
3809 enum rtw89_bandwidth bw)
3810 {
3811 _ctrl_ch(rtwdev, phy, central_ch, band);
3812 _ctrl_bw(rtwdev, phy, bw);
3813 _rxbb_bw(rtwdev, phy, bw);
3814 }
3815
rtw8852c_set_channel_rf(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)3816 void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
3817 const struct rtw89_chan *chan,
3818 enum rtw89_phy_idx phy_idx)
3819 {
3820 rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, chan->channel,
3821 chan->band_type,
3822 chan->band_width);
3823 }
3824
rtw8852c_mcc_get_ch_info(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)3825 void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3826 {
3827 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3828 struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
3829 u8 idx = mcc_info->table_idx;
3830 int i;
3831
3832 for (i = 0; i < RTW89_IQK_CHS_NR; i++) {
3833 if (mcc_info->ch[idx] == 0)
3834 break;
3835 if (++idx >= RTW89_IQK_CHS_NR)
3836 idx = 0;
3837 }
3838
3839 mcc_info->table_idx = idx;
3840 mcc_info->ch[idx] = chan->channel;
3841 mcc_info->band[idx] = chan->band_type;
3842 }
3843
rtw8852c_rck(struct rtw89_dev * rtwdev)3844 void rtw8852c_rck(struct rtw89_dev *rtwdev)
3845 {
3846 u8 path;
3847
3848 for (path = 0; path < 2; path++)
3849 _rck(rtwdev, path);
3850 }
3851
rtw8852c_dack(struct rtw89_dev * rtwdev)3852 void rtw8852c_dack(struct rtw89_dev *rtwdev)
3853 {
3854 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
3855
3856 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
3857 _dac_cal(rtwdev, false);
3858 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
3859 }
3860
rtw8852c_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)3861 void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3862 {
3863 u32 tx_en;
3864 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3865
3866 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
3867 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3868 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3869
3870 _iqk_init(rtwdev);
3871 _iqk(rtwdev, phy_idx, false);
3872
3873 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3874 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
3875 }
3876
3877 #define RXDCK_VER_8852C 0xe
3878
rtw8852c_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool is_afe)3879 void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
3880 {
3881 struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
3882 u8 path, kpath;
3883 u32 rf_reg5;
3884
3885 kpath = _kpath(rtwdev, phy);
3886 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3887 "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
3888 RXDCK_VER_8852C, rtwdev->hal.cv);
3889
3890 for (path = 0; path < 2; path++) {
3891 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
3892 if (!(kpath & BIT(path)))
3893 continue;
3894
3895 if (rtwdev->is_tssi_mode[path])
3896 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
3897 B_P0_TSSI_TRK_EN, 0x1);
3898 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
3899 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
3900 _set_rx_dck(rtwdev, phy, path, is_afe);
3901 rx_dck->thermal[path] = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
3902 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
3903
3904 if (rtwdev->is_tssi_mode[path])
3905 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
3906 B_P0_TSSI_TRK_EN, 0x0);
3907 }
3908 }
3909
3910 #define RTW8852C_RX_DCK_TH 8
3911
rtw8852c_rx_dck_track(struct rtw89_dev * rtwdev)3912 void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
3913 {
3914 struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
3915 u8 cur_thermal;
3916 int delta;
3917 int path;
3918
3919 for (path = 0; path < RF_PATH_NUM_8852C; path++) {
3920 cur_thermal =
3921 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
3922 delta = abs((int)cur_thermal - rx_dck->thermal[path]);
3923
3924 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
3925 "[RX_DCK] path=%d current thermal=0x%x delta=0x%x\n",
3926 path, cur_thermal, delta);
3927
3928 if (delta >= RTW8852C_RX_DCK_TH) {
3929 rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
3930 return;
3931 }
3932 }
3933 }
3934
rtw8852c_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)3935 void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3936 {
3937 u32 tx_en;
3938 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3939
3940 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3941 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3942 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3943
3944 rtwdev->dpk.is_dpk_enable = true;
3945 rtwdev->dpk.is_dpk_reload_en = false;
3946 _dpk(rtwdev, phy_idx, false);
3947
3948 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3949 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3950 }
3951
rtw8852c_dpk_track(struct rtw89_dev * rtwdev)3952 void rtw8852c_dpk_track(struct rtw89_dev *rtwdev)
3953 {
3954 _dpk_track(rtwdev);
3955 }
3956
rtw8852c_tssi(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3957 void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3958 {
3959 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
3960
3961 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
3962
3963 if (rtwdev->dbcc_en) {
3964 if (phy == RTW89_PHY_0) {
3965 path = RF_PATH_A;
3966 path_max = RF_PATH_B;
3967 } else if (phy == RTW89_PHY_1) {
3968 path = RF_PATH_B;
3969 path_max = RF_PATH_NUM_8852C;
3970 }
3971 }
3972
3973 _tssi_disable(rtwdev, phy);
3974
3975 for (i = path; i < path_max; i++) {
3976 _tssi_set_sys(rtwdev, phy, i);
3977 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
3978 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
3979 _tssi_set_dck(rtwdev, phy, i);
3980 _tssi_set_bbgain_split(rtwdev, phy, i);
3981 _tssi_set_tmeter_tbl(rtwdev, phy, i);
3982 _tssi_slope_cal_org(rtwdev, phy, i);
3983 _tssi_set_aligk_default(rtwdev, phy, i);
3984 _tssi_set_slope(rtwdev, phy, i);
3985 _tssi_run_slope(rtwdev, phy, i);
3986 }
3987
3988 _tssi_enable(rtwdev, phy);
3989 _tssi_set_efuse_to_de(rtwdev, phy);
3990 }
3991
rtw8852c_tssi_scan(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3992 void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3993 {
3994 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
3995
3996 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
3997 __func__, phy);
3998
3999 if (!rtwdev->is_tssi_mode[RF_PATH_A])
4000 return;
4001 if (!rtwdev->is_tssi_mode[RF_PATH_B])
4002 return;
4003
4004 if (rtwdev->dbcc_en) {
4005 if (phy == RTW89_PHY_0) {
4006 path = RF_PATH_A;
4007 path_max = RF_PATH_B;
4008 } else if (phy == RTW89_PHY_1) {
4009 path = RF_PATH_B;
4010 path_max = RF_PATH_NUM_8852C;
4011 }
4012 }
4013
4014 _tssi_disable(rtwdev, phy);
4015
4016 for (i = path; i < path_max; i++) {
4017 _tssi_set_sys(rtwdev, phy, i);
4018 _tssi_set_dck(rtwdev, phy, i);
4019 _tssi_set_tmeter_tbl(rtwdev, phy, i);
4020 _tssi_slope_cal_org(rtwdev, phy, i);
4021 _tssi_set_aligk_default(rtwdev, phy, i);
4022 }
4023
4024 _tssi_enable(rtwdev, phy);
4025 _tssi_set_efuse_to_de(rtwdev, phy);
4026 }
4027
rtw8852c_tssi_default_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool enable)4028 static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev,
4029 enum rtw89_phy_idx phy, bool enable)
4030 {
4031 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4032 u8 i;
4033
4034 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
4035 return;
4036
4037 if (enable) {
4038 /* SCAN_START */
4039 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
4040 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
4041 for (i = 0; i < 6; i++) {
4042 tssi_info->default_txagc_offset[RF_PATH_A] =
4043 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
4044 B_TXAGC_BB);
4045 if (tssi_info->default_txagc_offset[RF_PATH_A])
4046 break;
4047 }
4048 }
4049
4050 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
4051 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
4052 for (i = 0; i < 6; i++) {
4053 tssi_info->default_txagc_offset[RF_PATH_B] =
4054 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
4055 B_TXAGC_BB_S1);
4056 if (tssi_info->default_txagc_offset[RF_PATH_B])
4057 break;
4058 }
4059 }
4060 } else {
4061 /* SCAN_END */
4062 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
4063 tssi_info->default_txagc_offset[RF_PATH_A]);
4064 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
4065 tssi_info->default_txagc_offset[RF_PATH_B]);
4066
4067 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
4068 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
4069
4070 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
4071 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
4072 }
4073 }
4074
rtw8852c_wifi_scan_notify(struct rtw89_dev * rtwdev,bool scan_start,enum rtw89_phy_idx phy_idx)4075 void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev,
4076 bool scan_start, enum rtw89_phy_idx phy_idx)
4077 {
4078 if (scan_start)
4079 rtw8852c_tssi_default_txagc(rtwdev, phy_idx, true);
4080 else
4081 rtw8852c_tssi_default_txagc(rtwdev, phy_idx, false);
4082 }
4083