1 /*
2 * Copyright (c) 2014, Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "ath9k.h"
18 #include "hw.h"
19 #include "dynack.h"
20
21 #define COMPUTE_TO (5 * HZ)
22 #define LATEACK_DELAY (10 * HZ)
23 #define EWMA_LEVEL 96
24 #define EWMA_DIV 128
25
26 /**
27 * ath_dynack_get_max_to - set max timeout according to channel width
28 * @ah: ath hw
29 *
30 */
ath_dynack_get_max_to(struct ath_hw * ah)31 static u32 ath_dynack_get_max_to(struct ath_hw *ah)
32 {
33 const struct ath9k_channel *chan = ah->curchan;
34
35 if (!chan)
36 return 300;
37
38 if (IS_CHAN_HT40(chan))
39 return 300;
40 if (IS_CHAN_HALF_RATE(chan))
41 return 750;
42 if (IS_CHAN_QUARTER_RATE(chan))
43 return 1500;
44 return 600;
45 }
46
47 /**
48 * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
49 *
50 */
ath_dynack_ewma(int old,int new)51 static inline int ath_dynack_ewma(int old, int new)
52 {
53 if (old > 0)
54 return (new * (EWMA_DIV - EWMA_LEVEL) +
55 old * EWMA_LEVEL) / EWMA_DIV;
56 else
57 return new;
58 }
59
60 /**
61 * ath_dynack_get_sifs - get sifs time based on phy used
62 * @ah: ath hw
63 * @phy: phy used
64 *
65 */
ath_dynack_get_sifs(struct ath_hw * ah,int phy)66 static inline u32 ath_dynack_get_sifs(struct ath_hw *ah, int phy)
67 {
68 u32 sifs = CCK_SIFS_TIME;
69
70 if (phy == WLAN_RC_PHY_OFDM) {
71 if (IS_CHAN_QUARTER_RATE(ah->curchan))
72 sifs = OFDM_SIFS_TIME_QUARTER;
73 else if (IS_CHAN_HALF_RATE(ah->curchan))
74 sifs = OFDM_SIFS_TIME_HALF;
75 else
76 sifs = OFDM_SIFS_TIME;
77 }
78 return sifs;
79 }
80
81 /**
82 * ath_dynack_bssidmask - filter out ACK frames based on BSSID mask
83 * @ah: ath hw
84 * @mac: receiver address
85 */
ath_dynack_bssidmask(struct ath_hw * ah,const u8 * mac)86 static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac)
87 {
88 int i;
89 struct ath_common *common = ath9k_hw_common(ah);
90
91 for (i = 0; i < ETH_ALEN; i++) {
92 if ((common->macaddr[i] & common->bssidmask[i]) !=
93 (mac[i] & common->bssidmask[i]))
94 return false;
95 }
96
97 return true;
98 }
99
100 /**
101 * ath_dynack_set_timeout - configure timeouts/slottime registers
102 * @ah: ath hw
103 * @to: timeout value
104 *
105 */
ath_dynack_set_timeout(struct ath_hw * ah,int to)106 static void ath_dynack_set_timeout(struct ath_hw *ah, int to)
107 {
108 struct ath_common *common = ath9k_hw_common(ah);
109 int slottime = (to - 3) / 2;
110
111 ath_dbg(common, DYNACK, "ACK timeout %u slottime %u\n",
112 to, slottime);
113 ath9k_hw_setslottime(ah, slottime);
114 ath9k_hw_set_ack_timeout(ah, to);
115 ath9k_hw_set_cts_timeout(ah, to);
116 }
117
118 /**
119 * ath_dynack_compute_ackto - compute ACK timeout as the maximum STA timeout
120 * @ah: ath hw
121 *
122 * should be called while holding qlock
123 */
ath_dynack_compute_ackto(struct ath_hw * ah)124 static void ath_dynack_compute_ackto(struct ath_hw *ah)
125 {
126 struct ath_dynack *da = &ah->dynack;
127 struct ath_node *an;
128 int to = 0;
129
130 list_for_each_entry(an, &da->nodes, list)
131 if (an->ackto > to)
132 to = an->ackto;
133
134 if (to && da->ackto != to) {
135 ath_dynack_set_timeout(ah, to);
136 da->ackto = to;
137 }
138 }
139
140 /**
141 * ath_dynack_compute_to - compute STA ACK timeout
142 * @ah: ath hw
143 *
144 * should be called while holding qlock
145 */
ath_dynack_compute_to(struct ath_hw * ah)146 static void ath_dynack_compute_to(struct ath_hw *ah)
147 {
148 struct ath_dynack *da = &ah->dynack;
149 u32 ackto, ack_ts, max_to;
150 struct ieee80211_sta *sta;
151 struct ts_info *st_ts;
152 struct ath_node *an;
153 u8 *dst, *src;
154
155 rcu_read_lock();
156
157 max_to = ath_dynack_get_max_to(ah);
158 while (da->st_rbf.h_rb != da->st_rbf.t_rb &&
159 da->ack_rbf.h_rb != da->ack_rbf.t_rb) {
160 ack_ts = da->ack_rbf.tstamp[da->ack_rbf.h_rb];
161 st_ts = &da->st_rbf.ts[da->st_rbf.h_rb];
162 dst = da->st_rbf.addr[da->st_rbf.h_rb].h_dest;
163 src = da->st_rbf.addr[da->st_rbf.h_rb].h_src;
164
165 ath_dbg(ath9k_hw_common(ah), DYNACK,
166 "ack_ts %u st_ts %u st_dur %u [%u-%u]\n",
167 ack_ts, st_ts->tstamp, st_ts->dur,
168 da->ack_rbf.h_rb, da->st_rbf.h_rb);
169
170 if (ack_ts > st_ts->tstamp + st_ts->dur) {
171 ackto = ack_ts - st_ts->tstamp - st_ts->dur;
172
173 if (ackto < max_to) {
174 sta = ieee80211_find_sta_by_ifaddr(ah->hw, dst,
175 src);
176 if (sta) {
177 an = (struct ath_node *)sta->drv_priv;
178 an->ackto = ath_dynack_ewma(an->ackto,
179 ackto);
180 ath_dbg(ath9k_hw_common(ah), DYNACK,
181 "%pM to %d [%u]\n", dst,
182 an->ackto, ackto);
183 if (time_is_before_jiffies(da->lto)) {
184 ath_dynack_compute_ackto(ah);
185 da->lto = jiffies + COMPUTE_TO;
186 }
187 }
188 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
189 }
190 INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
191 } else {
192 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
193 }
194 }
195
196 rcu_read_unlock();
197 }
198
199 /**
200 * ath_dynack_sample_tx_ts - status timestamp sampling method
201 * @ah: ath hw
202 * @skb: socket buffer
203 * @ts: tx status info
204 * @sta: station pointer
205 *
206 */
ath_dynack_sample_tx_ts(struct ath_hw * ah,struct sk_buff * skb,struct ath_tx_status * ts,struct ieee80211_sta * sta)207 void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
208 struct ath_tx_status *ts,
209 struct ieee80211_sta *sta)
210 {
211 struct ieee80211_hdr *hdr;
212 struct ath_dynack *da = &ah->dynack;
213 struct ath_common *common = ath9k_hw_common(ah);
214 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
215 u32 dur = ts->duration;
216 u8 ridx;
217
218 if (!da->enabled || (info->flags & IEEE80211_TX_CTL_NO_ACK))
219 return;
220
221 spin_lock_bh(&da->qlock);
222
223 hdr = (struct ieee80211_hdr *)skb->data;
224
225 /* late ACK */
226 if (ts->ts_status & ATH9K_TXERR_XRETRY) {
227 if (ieee80211_is_assoc_req(hdr->frame_control) ||
228 ieee80211_is_assoc_resp(hdr->frame_control) ||
229 ieee80211_is_auth(hdr->frame_control)) {
230 u32 max_to = ath_dynack_get_max_to(ah);
231
232 ath_dbg(common, DYNACK, "late ack\n");
233 ath_dynack_set_timeout(ah, max_to);
234 if (sta) {
235 struct ath_node *an;
236
237 an = (struct ath_node *)sta->drv_priv;
238 an->ackto = -1;
239 }
240 da->lto = jiffies + LATEACK_DELAY;
241 }
242
243 spin_unlock_bh(&da->qlock);
244 return;
245 }
246
247 ridx = ts->ts_rateindex;
248
249 da->st_rbf.ts[da->st_rbf.t_rb].tstamp = ts->ts_tstamp;
250 ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1);
251 ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2);
252
253 if (!(info->status.rates[ridx].flags & IEEE80211_TX_RC_MCS)) {
254 const struct ieee80211_rate *rate;
255 struct ieee80211_tx_rate *rates = info->status.rates;
256 u32 phy;
257
258 rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
259 if (info->band == NL80211_BAND_2GHZ &&
260 !(rate->flags & IEEE80211_RATE_ERP_G))
261 phy = WLAN_RC_PHY_CCK;
262 else
263 phy = WLAN_RC_PHY_OFDM;
264
265 dur -= ath_dynack_get_sifs(ah, phy);
266 }
267 da->st_rbf.ts[da->st_rbf.t_rb].dur = dur;
268
269 INCR(da->st_rbf.t_rb, ATH_DYN_BUF);
270 if (da->st_rbf.t_rb == da->st_rbf.h_rb)
271 INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
272
273 ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n",
274 hdr->addr1, ts->ts_tstamp, dur, da->st_rbf.h_rb,
275 da->st_rbf.t_rb);
276
277 ath_dynack_compute_to(ah);
278
279 spin_unlock_bh(&da->qlock);
280 }
281 EXPORT_SYMBOL(ath_dynack_sample_tx_ts);
282
283 /**
284 * ath_dynack_sample_ack_ts - ACK timestamp sampling method
285 * @ah: ath hw
286 * @skb: socket buffer
287 * @ts: rx timestamp
288 *
289 */
ath_dynack_sample_ack_ts(struct ath_hw * ah,struct sk_buff * skb,u32 ts)290 void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb,
291 u32 ts)
292 {
293 struct ath_dynack *da = &ah->dynack;
294 struct ath_common *common = ath9k_hw_common(ah);
295 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
296
297 if (!da->enabled || !ath_dynack_bssidmask(ah, hdr->addr1))
298 return;
299
300 spin_lock_bh(&da->qlock);
301 da->ack_rbf.tstamp[da->ack_rbf.t_rb] = ts;
302
303 INCR(da->ack_rbf.t_rb, ATH_DYN_BUF);
304 if (da->ack_rbf.t_rb == da->ack_rbf.h_rb)
305 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
306
307 ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n",
308 ts, da->ack_rbf.h_rb, da->ack_rbf.t_rb);
309
310 ath_dynack_compute_to(ah);
311
312 spin_unlock_bh(&da->qlock);
313 }
314 EXPORT_SYMBOL(ath_dynack_sample_ack_ts);
315
316 /**
317 * ath_dynack_node_init - init ath_node related info
318 * @ah: ath hw
319 * @an: ath node
320 *
321 */
ath_dynack_node_init(struct ath_hw * ah,struct ath_node * an)322 void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an)
323 {
324 struct ath_dynack *da = &ah->dynack;
325
326 an->ackto = da->ackto;
327
328 spin_lock_bh(&da->qlock);
329 list_add_tail(&an->list, &da->nodes);
330 spin_unlock_bh(&da->qlock);
331 }
332 EXPORT_SYMBOL(ath_dynack_node_init);
333
334 /**
335 * ath_dynack_node_deinit - deinit ath_node related info
336 * @ah: ath hw
337 * @an: ath node
338 *
339 */
ath_dynack_node_deinit(struct ath_hw * ah,struct ath_node * an)340 void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an)
341 {
342 struct ath_dynack *da = &ah->dynack;
343
344 spin_lock_bh(&da->qlock);
345 list_del(&an->list);
346 spin_unlock_bh(&da->qlock);
347 }
348 EXPORT_SYMBOL(ath_dynack_node_deinit);
349
350 /**
351 * ath_dynack_reset - reset dynack processing
352 * @ah: ath hw
353 *
354 */
ath_dynack_reset(struct ath_hw * ah)355 void ath_dynack_reset(struct ath_hw *ah)
356 {
357 struct ath_dynack *da = &ah->dynack;
358 struct ath_node *an;
359
360 spin_lock_bh(&da->qlock);
361
362 da->lto = jiffies + COMPUTE_TO;
363
364 da->st_rbf.t_rb = 0;
365 da->st_rbf.h_rb = 0;
366 da->ack_rbf.t_rb = 0;
367 da->ack_rbf.h_rb = 0;
368
369 da->ackto = ath_dynack_get_max_to(ah);
370 list_for_each_entry(an, &da->nodes, list)
371 an->ackto = da->ackto;
372
373 /* init acktimeout */
374 ath_dynack_set_timeout(ah, da->ackto);
375
376 spin_unlock_bh(&da->qlock);
377 }
378 EXPORT_SYMBOL(ath_dynack_reset);
379
380 /**
381 * ath_dynack_init - init dynack data structure
382 * @ah: ath hw
383 *
384 */
ath_dynack_init(struct ath_hw * ah)385 void ath_dynack_init(struct ath_hw *ah)
386 {
387 struct ath_dynack *da = &ah->dynack;
388
389 memset(da, 0, sizeof(struct ath_dynack));
390
391 spin_lock_init(&da->qlock);
392 INIT_LIST_HEAD(&da->nodes);
393 /* ackto = slottime + sifs + air delay */
394 da->ackto = 9 + 16 + 64;
395
396 ah->hw->wiphy->features |= NL80211_FEATURE_ACKTO_ESTIMATION;
397 }
398