1 /*
2  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "mt76x2.h"
19 #include "mt76x2_eeprom.h"
20 
21 static void
mt76x2_adjust_high_lna_gain(struct mt76x2_dev * dev,int reg,s8 offset)22 mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
23 {
24 	s8 gain;
25 
26 	gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
27 	gain -= offset / 2;
28 	mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
29 }
30 
31 static void
mt76x2_adjust_agc_gain(struct mt76x2_dev * dev,int reg,s8 offset)32 mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
33 {
34 	s8 gain;
35 
36 	gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
37 	gain += offset;
38 	mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
39 }
40 
mt76x2_apply_gain_adj(struct mt76x2_dev * dev)41 void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
42 {
43 	s8 *gain_adj = dev->cal.rx.high_gain;
44 
45 	mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
46 	mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
47 
48 	mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
49 	mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
50 }
51 EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
52 
mt76x2_phy_set_txpower_regs(struct mt76x2_dev * dev,enum nl80211_band band)53 void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
54 				 enum nl80211_band band)
55 {
56 	u32 pa_mode[2];
57 	u32 pa_mode_adj;
58 
59 	if (band == NL80211_BAND_2GHZ) {
60 		pa_mode[0] = 0x010055ff;
61 		pa_mode[1] = 0x00550055;
62 
63 		mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
64 		mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
65 
66 		if (mt76x2_ext_pa_enabled(dev, band)) {
67 			mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
68 			mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
69 		} else {
70 			mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
71 			mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
72 		}
73 	} else {
74 		pa_mode[0] = 0x0000ffff;
75 		pa_mode[1] = 0x00ff00ff;
76 
77 		if (mt76x2_ext_pa_enabled(dev, band)) {
78 			mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
79 			mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
80 		} else {
81 			mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
82 			mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
83 		}
84 
85 		if (mt76x2_ext_pa_enabled(dev, band))
86 			pa_mode_adj = 0x04000000;
87 		else
88 			pa_mode_adj = 0;
89 
90 		mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
91 		mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
92 	}
93 
94 	mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
95 	mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
96 	mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
97 	mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
98 
99 	if (mt76x2_ext_pa_enabled(dev, band)) {
100 		u32 val;
101 
102 		if (band == NL80211_BAND_2GHZ)
103 			val = 0x3c3c023c;
104 		else
105 			val = 0x363c023c;
106 
107 		mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
108 		mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
109 		mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
110 	} else {
111 		if (band == NL80211_BAND_2GHZ) {
112 			u32 val = 0x0f3c3c3c;
113 
114 			mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
115 			mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
116 			mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
117 		} else {
118 			mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
119 			mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
120 			mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
121 		}
122 	}
123 }
124 EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
125 
126 static void
mt76x2_limit_rate_power(struct mt76_rate_power * r,int limit)127 mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
128 {
129 	int i;
130 
131 	for (i = 0; i < sizeof(r->all); i++)
132 		if (r->all[i] > limit)
133 			r->all[i] = limit;
134 }
135 
136 static u32
mt76x2_tx_power_mask(u8 v1,u8 v2,u8 v3,u8 v4)137 mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
138 {
139 	u32 val = 0;
140 
141 	val |= (v1 & (BIT(6) - 1)) << 0;
142 	val |= (v2 & (BIT(6) - 1)) << 8;
143 	val |= (v3 & (BIT(6) - 1)) << 16;
144 	val |= (v4 & (BIT(6) - 1)) << 24;
145 	return val;
146 }
147 
148 static void
mt76x2_add_rate_power_offset(struct mt76_rate_power * r,int offset)149 mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
150 {
151 	int i;
152 
153 	for (i = 0; i < sizeof(r->all); i++)
154 		r->all[i] += offset;
155 }
156 
157 static int
mt76x2_get_min_rate_power(struct mt76_rate_power * r)158 mt76x2_get_min_rate_power(struct mt76_rate_power *r)
159 {
160 	int i;
161 	s8 ret = 0;
162 
163 	for (i = 0; i < sizeof(r->all); i++) {
164 		if (!r->all[i])
165 			continue;
166 
167 		if (ret)
168 			ret = min(ret, r->all[i]);
169 		else
170 			ret = r->all[i];
171 	}
172 
173 	return ret;
174 }
175 
mt76x2_phy_set_txpower(struct mt76x2_dev * dev)176 void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
177 {
178 	enum nl80211_chan_width width = dev->mt76.chandef.width;
179 	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
180 	struct mt76x2_tx_power_info txp;
181 	int txp_0, txp_1, delta = 0;
182 	struct mt76_rate_power t = {};
183 	int base_power, gain;
184 
185 	mt76x2_get_power_info(dev, &txp, chan);
186 
187 	if (width == NL80211_CHAN_WIDTH_40)
188 		delta = txp.delta_bw40;
189 	else if (width == NL80211_CHAN_WIDTH_80)
190 		delta = txp.delta_bw80;
191 
192 	mt76x2_get_rate_power(dev, &t, chan);
193 	mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
194 	mt76x2_limit_rate_power(&t, dev->txpower_conf);
195 	dev->txpower_cur = mt76x2_get_max_rate_power(&t);
196 
197 	base_power = mt76x2_get_min_rate_power(&t);
198 	delta += base_power - txp.chain[0].target_power;
199 	txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
200 	txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
201 
202 	gain = min(txp_0, txp_1);
203 	if (gain < 0) {
204 		base_power -= gain;
205 		txp_0 -= gain;
206 		txp_1 -= gain;
207 	} else if (gain > 0x2f) {
208 		base_power -= gain - 0x2f;
209 		txp_0 = 0x2f;
210 		txp_1 = 0x2f;
211 	}
212 
213 	mt76x2_add_rate_power_offset(&t, -base_power);
214 	dev->target_power = txp.chain[0].target_power;
215 	dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
216 	dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
217 	dev->rate_power = t;
218 
219 	mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
220 	mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
221 
222 	mt76_wr(dev, MT_TX_PWR_CFG_0,
223 		mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
224 	mt76_wr(dev, MT_TX_PWR_CFG_1,
225 		mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
226 	mt76_wr(dev, MT_TX_PWR_CFG_2,
227 		mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
228 	mt76_wr(dev, MT_TX_PWR_CFG_3,
229 		mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
230 	mt76_wr(dev, MT_TX_PWR_CFG_4,
231 		mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
232 	mt76_wr(dev, MT_TX_PWR_CFG_7,
233 		mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
234 	mt76_wr(dev, MT_TX_PWR_CFG_8,
235 		mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
236 	mt76_wr(dev, MT_TX_PWR_CFG_9,
237 		mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
238 }
239 EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
240 
mt76x2_configure_tx_delay(struct mt76x2_dev * dev,enum nl80211_band band,u8 bw)241 void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
242 			       enum nl80211_band band, u8 bw)
243 {
244 	u32 cfg0, cfg1;
245 
246 	if (mt76x2_ext_pa_enabled(dev, band)) {
247 		cfg0 = bw ? 0x000b0c01 : 0x00101101;
248 		cfg1 = 0x00011414;
249 	} else {
250 		cfg0 = bw ? 0x000b0b01 : 0x00101001;
251 		cfg1 = 0x00021414;
252 	}
253 	mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
254 	mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
255 
256 	mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
257 }
258 EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
259 
mt76x2_phy_set_bw(struct mt76x2_dev * dev,int width,u8 ctrl)260 void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
261 {
262 	int core_val, agc_val;
263 
264 	switch (width) {
265 	case NL80211_CHAN_WIDTH_80:
266 		core_val = 3;
267 		agc_val = 7;
268 		break;
269 	case NL80211_CHAN_WIDTH_40:
270 		core_val = 2;
271 		agc_val = 3;
272 		break;
273 	default:
274 		core_val = 0;
275 		agc_val = 1;
276 		break;
277 	}
278 
279 	mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
280 	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
281 	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
282 	mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
283 }
284 EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
285 
mt76x2_phy_set_band(struct mt76x2_dev * dev,int band,bool primary_upper)286 void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
287 {
288 	switch (band) {
289 	case NL80211_BAND_2GHZ:
290 		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
291 		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
292 		break;
293 	case NL80211_BAND_5GHZ:
294 		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
295 		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
296 		break;
297 	}
298 
299 	mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
300 		       primary_upper);
301 }
302 EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
303 
mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev * dev)304 int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
305 {
306 	struct mt76x2_sta *sta;
307 	struct mt76_wcid *wcid;
308 	int i, j, min_rssi = 0;
309 	s8 cur_rssi;
310 
311 	local_bh_disable();
312 	rcu_read_lock();
313 
314 	for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
315 		unsigned long mask = dev->wcid_mask[i];
316 
317 		if (!mask)
318 			continue;
319 
320 		for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
321 			if (!(mask & 1))
322 				continue;
323 
324 			wcid = rcu_dereference(dev->wcid[j]);
325 			if (!wcid)
326 				continue;
327 
328 			sta = container_of(wcid, struct mt76x2_sta, wcid);
329 			spin_lock(&dev->mt76.rx_lock);
330 			if (sta->inactive_count++ < 5)
331 				cur_rssi = ewma_signal_read(&sta->rssi);
332 			else
333 				cur_rssi = 0;
334 			spin_unlock(&dev->mt76.rx_lock);
335 
336 			if (cur_rssi < min_rssi)
337 				min_rssi = cur_rssi;
338 		}
339 	}
340 
341 	rcu_read_unlock();
342 	local_bh_enable();
343 
344 	if (!min_rssi)
345 		return -75;
346 
347 	return min_rssi;
348 }
349 EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi);
350