1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Data transmitting implementation.
4 *
5 * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
6 * Copyright (c) 2010, ST-Ericsson
7 */
8 #include <net/mac80211.h>
9 #include <linux/etherdevice.h>
10
11 #include "data_tx.h"
12 #include "wfx.h"
13 #include "bh.h"
14 #include "sta.h"
15 #include "queue.h"
16 #include "debug.h"
17 #include "traces.h"
18 #include "hif_tx_mib.h"
19
wfx_get_hw_rate(struct wfx_dev * wdev,const struct ieee80211_tx_rate * rate)20 static int wfx_get_hw_rate(struct wfx_dev *wdev, const struct ieee80211_tx_rate *rate)
21 {
22 struct ieee80211_supported_band *band;
23
24 if (rate->idx < 0)
25 return -1;
26 if (rate->flags & IEEE80211_TX_RC_MCS) {
27 if (rate->idx > 7) {
28 WARN(1, "wrong rate->idx value: %d", rate->idx);
29 return -1;
30 }
31 return rate->idx + 14;
32 }
33 /* The device only support 2GHz, else band information should be retrieved from
34 * ieee80211_tx_info
35 */
36 band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
37 if (rate->idx >= band->n_bitrates) {
38 WARN(1, "wrong rate->idx value: %d", rate->idx);
39 return -1;
40 }
41 return band->bitrates[rate->idx].hw_value;
42 }
43
44 /* TX policy cache implementation */
45
wfx_tx_policy_build(struct wfx_vif * wvif,struct wfx_tx_policy * policy,struct ieee80211_tx_rate * rates)46 static void wfx_tx_policy_build(struct wfx_vif *wvif, struct wfx_tx_policy *policy,
47 struct ieee80211_tx_rate *rates)
48 {
49 struct wfx_dev *wdev = wvif->wdev;
50 int i, rateid;
51 u8 count;
52
53 WARN(rates[0].idx < 0, "invalid rate policy");
54 memset(policy, 0, sizeof(*policy));
55 for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
56 if (rates[i].idx < 0)
57 break;
58 WARN_ON(rates[i].count > 15);
59 rateid = wfx_get_hw_rate(wdev, &rates[i]);
60 /* Pack two values in each byte of policy->rates */
61 count = rates[i].count;
62 if (rateid % 2)
63 count <<= 4;
64 policy->rates[rateid / 2] |= count;
65 }
66 }
67
wfx_tx_policy_is_equal(const struct wfx_tx_policy * a,const struct wfx_tx_policy * b)68 static bool wfx_tx_policy_is_equal(const struct wfx_tx_policy *a, const struct wfx_tx_policy *b)
69 {
70 return !memcmp(a->rates, b->rates, sizeof(a->rates));
71 }
72
wfx_tx_policy_find(struct wfx_tx_policy_cache * cache,struct wfx_tx_policy * wanted)73 static int wfx_tx_policy_find(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *wanted)
74 {
75 struct wfx_tx_policy *it;
76
77 list_for_each_entry(it, &cache->used, link)
78 if (wfx_tx_policy_is_equal(wanted, it))
79 return it - cache->cache;
80 list_for_each_entry(it, &cache->free, link)
81 if (wfx_tx_policy_is_equal(wanted, it))
82 return it - cache->cache;
83 return -1;
84 }
85
wfx_tx_policy_use(struct wfx_tx_policy_cache * cache,struct wfx_tx_policy * entry)86 static void wfx_tx_policy_use(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry)
87 {
88 ++entry->usage_count;
89 list_move(&entry->link, &cache->used);
90 }
91
wfx_tx_policy_release(struct wfx_tx_policy_cache * cache,struct wfx_tx_policy * entry)92 static int wfx_tx_policy_release(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry)
93 {
94 int ret = --entry->usage_count;
95
96 if (!ret)
97 list_move(&entry->link, &cache->free);
98 return ret;
99 }
100
wfx_tx_policy_get(struct wfx_vif * wvif,struct ieee80211_tx_rate * rates,bool * renew)101 static int wfx_tx_policy_get(struct wfx_vif *wvif, struct ieee80211_tx_rate *rates, bool *renew)
102 {
103 int idx;
104 struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
105 struct wfx_tx_policy wanted;
106 struct wfx_tx_policy *entry;
107
108 wfx_tx_policy_build(wvif, &wanted, rates);
109
110 spin_lock_bh(&cache->lock);
111 if (list_empty(&cache->free)) {
112 WARN(1, "unable to get a valid Tx policy");
113 spin_unlock_bh(&cache->lock);
114 return HIF_TX_RETRY_POLICY_INVALID;
115 }
116 idx = wfx_tx_policy_find(cache, &wanted);
117 if (idx >= 0) {
118 *renew = false;
119 } else {
120 /* If policy is not found create a new one using the oldest entry in "free" list */
121 *renew = true;
122 entry = list_entry(cache->free.prev, struct wfx_tx_policy, link);
123 memcpy(entry->rates, wanted.rates, sizeof(entry->rates));
124 entry->uploaded = false;
125 entry->usage_count = 0;
126 idx = entry - cache->cache;
127 }
128 wfx_tx_policy_use(cache, &cache->cache[idx]);
129 if (list_empty(&cache->free))
130 ieee80211_stop_queues(wvif->wdev->hw);
131 spin_unlock_bh(&cache->lock);
132 return idx;
133 }
134
wfx_tx_policy_put(struct wfx_vif * wvif,int idx)135 static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
136 {
137 int usage, locked;
138 struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
139
140 if (idx == HIF_TX_RETRY_POLICY_INVALID)
141 return;
142 spin_lock_bh(&cache->lock);
143 locked = list_empty(&cache->free);
144 usage = wfx_tx_policy_release(cache, &cache->cache[idx]);
145 if (locked && !usage)
146 ieee80211_wake_queues(wvif->wdev->hw);
147 spin_unlock_bh(&cache->lock);
148 }
149
wfx_tx_policy_upload(struct wfx_vif * wvif)150 static int wfx_tx_policy_upload(struct wfx_vif *wvif)
151 {
152 struct wfx_tx_policy *policies = wvif->tx_policy_cache.cache;
153 u8 tmp_rates[12];
154 int i, is_used;
155
156 do {
157 spin_lock_bh(&wvif->tx_policy_cache.lock);
158 for (i = 0; i < ARRAY_SIZE(wvif->tx_policy_cache.cache); ++i) {
159 is_used = memzcmp(policies[i].rates, sizeof(policies[i].rates));
160 if (!policies[i].uploaded && is_used)
161 break;
162 }
163 if (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)) {
164 policies[i].uploaded = true;
165 memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates));
166 spin_unlock_bh(&wvif->tx_policy_cache.lock);
167 wfx_hif_set_tx_rate_retry_policy(wvif, i, tmp_rates);
168 } else {
169 spin_unlock_bh(&wvif->tx_policy_cache.lock);
170 }
171 } while (i < ARRAY_SIZE(wvif->tx_policy_cache.cache));
172 return 0;
173 }
174
wfx_tx_policy_upload_work(struct work_struct * work)175 void wfx_tx_policy_upload_work(struct work_struct *work)
176 {
177 struct wfx_vif *wvif = container_of(work, struct wfx_vif, tx_policy_upload_work);
178
179 wfx_tx_policy_upload(wvif);
180 wfx_tx_unlock(wvif->wdev);
181 }
182
wfx_tx_policy_init(struct wfx_vif * wvif)183 void wfx_tx_policy_init(struct wfx_vif *wvif)
184 {
185 struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
186 int i;
187
188 memset(cache, 0, sizeof(*cache));
189
190 spin_lock_init(&cache->lock);
191 INIT_LIST_HEAD(&cache->used);
192 INIT_LIST_HEAD(&cache->free);
193
194 for (i = 0; i < ARRAY_SIZE(cache->cache); ++i)
195 list_add(&cache->cache[i].link, &cache->free);
196 }
197
198 /* Tx implementation */
199
wfx_is_action_back(struct ieee80211_hdr * hdr)200 static bool wfx_is_action_back(struct ieee80211_hdr *hdr)
201 {
202 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
203
204 if (!ieee80211_is_action(mgmt->frame_control))
205 return false;
206 if (mgmt->u.action.category != WLAN_CATEGORY_BACK)
207 return false;
208 return true;
209 }
210
wfx_tx_get_link_id(struct wfx_vif * wvif,struct ieee80211_sta * sta,struct ieee80211_hdr * hdr)211 static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
212 struct ieee80211_hdr *hdr)
213 {
214 struct wfx_sta_priv *sta_priv = sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL;
215 struct ieee80211_vif *vif = wvif_to_vif(wvif);
216 const u8 *da = ieee80211_get_DA(hdr);
217
218 if (sta_priv && sta_priv->link_id)
219 return sta_priv->link_id;
220 if (vif->type != NL80211_IFTYPE_AP)
221 return 0;
222 if (is_multicast_ether_addr(da))
223 return 0;
224 return HIF_LINK_ID_NOT_ASSOCIATED;
225 }
226
wfx_tx_fixup_rates(struct ieee80211_tx_rate * rates)227 static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
228 {
229 int i;
230 bool finished;
231
232 /* Firmware is not able to mix rates with different flags */
233 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
234 if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
235 rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
236 if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
237 rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
238 if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
239 rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
240 }
241
242 /* Sort rates and remove duplicates */
243 do {
244 finished = true;
245 for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
246 if (rates[i + 1].idx == rates[i].idx &&
247 rates[i].idx != -1) {
248 rates[i].count += rates[i + 1].count;
249 if (rates[i].count > 15)
250 rates[i].count = 15;
251 rates[i + 1].idx = -1;
252 rates[i + 1].count = 0;
253
254 finished = false;
255 }
256 if (rates[i + 1].idx > rates[i].idx) {
257 swap(rates[i + 1], rates[i]);
258 finished = false;
259 }
260 }
261 } while (!finished);
262 /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */
263 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
264 if (rates[i].idx == 0)
265 break;
266 if (rates[i].idx == -1) {
267 rates[i].idx = 0;
268 rates[i].count = 8; /* == hw->max_rate_tries */
269 rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
270 break;
271 }
272 }
273 /* All retries use long GI */
274 for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
275 rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
276 }
277
wfx_tx_get_retry_policy_id(struct wfx_vif * wvif,struct ieee80211_tx_info * tx_info)278 static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info)
279 {
280 bool tx_policy_renew = false;
281 u8 ret;
282
283 ret = wfx_tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew);
284 if (ret == HIF_TX_RETRY_POLICY_INVALID)
285 dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy");
286
287 if (tx_policy_renew) {
288 wfx_tx_lock(wvif->wdev);
289 if (!schedule_work(&wvif->tx_policy_upload_work))
290 wfx_tx_unlock(wvif->wdev);
291 }
292 return ret;
293 }
294
wfx_tx_get_frame_format(struct ieee80211_tx_info * tx_info)295 static int wfx_tx_get_frame_format(struct ieee80211_tx_info *tx_info)
296 {
297 if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_MCS))
298 return HIF_FRAME_FORMAT_NON_HT;
299 else if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD))
300 return HIF_FRAME_FORMAT_MIXED_FORMAT_HT;
301 else
302 return HIF_FRAME_FORMAT_GF_HT_11N;
303 }
304
wfx_tx_get_icv_len(struct ieee80211_key_conf * hw_key)305 static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
306 {
307 int mic_space;
308
309 if (!hw_key)
310 return 0;
311 if (hw_key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
312 return 0;
313 mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0;
314 return hw_key->icv_len + mic_space;
315 }
316
wfx_tx_inner(struct wfx_vif * wvif,struct ieee80211_sta * sta,struct sk_buff * skb)317 static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct sk_buff *skb)
318 {
319 struct wfx_hif_msg *hif_msg;
320 struct wfx_hif_req_tx *req;
321 struct wfx_tx_priv *tx_priv;
322 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
323 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
324 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
325 int queue_id = skb_get_queue_mapping(skb);
326 size_t offset = (size_t)skb->data & 3;
327 int wmsg_len = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + offset;
328
329 WARN(queue_id >= IEEE80211_NUM_ACS, "unsupported queue_id");
330 wfx_tx_fixup_rates(tx_info->driver_rates);
331
332 /* From now tx_info->control is unusable */
333 memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
334 /* Fill tx_priv */
335 tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
336 tx_priv->icv_size = wfx_tx_get_icv_len(hw_key);
337
338 /* Fill hif_msg */
339 WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
340 WARN(offset & 1, "attempt to transmit an unaligned frame");
341 skb_put(skb, tx_priv->icv_size);
342 skb_push(skb, wmsg_len);
343 memset(skb->data, 0, wmsg_len);
344 hif_msg = (struct wfx_hif_msg *)skb->data;
345 hif_msg->len = cpu_to_le16(skb->len);
346 hif_msg->id = HIF_REQ_ID_TX;
347 hif_msg->interface = wvif->id;
348 if (skb->len > le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)) {
349 dev_warn(wvif->wdev->dev,
350 "requested frame size (%d) is larger than maximum supported (%d)\n",
351 skb->len, le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf));
352 skb_pull(skb, wmsg_len);
353 return -EIO;
354 }
355
356 /* Fill tx request */
357 req = (struct wfx_hif_req_tx *)hif_msg->body;
358 /* packet_id just need to be unique on device. 32bits are more than necessary for that task,
359 * so we take advantage of it to add some extra data for debug.
360 */
361 req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF;
362 req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16;
363 req->packet_id |= queue_id << 28;
364
365 req->fc_offset = offset;
366 /* Queue index are inverted between firmware and Linux */
367 req->queue_id = 3 - queue_id;
368 req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr);
369 req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info);
370 req->frame_format = wfx_tx_get_frame_format(tx_info);
371 if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
372 req->short_gi = 1;
373 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
374 req->after_dtim = 1;
375
376 /* Auxiliary operations */
377 wfx_tx_queues_put(wvif, skb);
378 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
379 schedule_work(&wvif->update_tim_work);
380 wfx_bh_request_tx(wvif->wdev);
381 return 0;
382 }
383
wfx_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)384 void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb)
385 {
386 struct wfx_dev *wdev = hw->priv;
387 struct wfx_vif *wvif;
388 struct ieee80211_sta *sta = control ? control->sta : NULL;
389 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
390 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
391 size_t driver_data_room = sizeof_field(struct ieee80211_tx_info, rate_driver_data);
392
393 BUILD_BUG_ON_MSG(sizeof(struct wfx_tx_priv) > driver_data_room,
394 "struct tx_priv is too large");
395 WARN(skb->next || skb->prev, "skb is already member of a list");
396 /* control.vif can be NULL for injected frames */
397 if (tx_info->control.vif)
398 wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv;
399 else
400 wvif = wvif_iterate(wdev, NULL);
401 if (WARN_ON(!wvif))
402 goto drop;
403 /* Because of TX_AMPDU_SETUP_IN_HW, mac80211 does not try to send any BlockAck session
404 * management frame. The check below exist just in case.
405 */
406 if (wfx_is_action_back(hdr)) {
407 dev_info(wdev->dev, "drop BA action\n");
408 goto drop;
409 }
410 if (wfx_tx_inner(wvif, sta, skb))
411 goto drop;
412
413 return;
414
415 drop:
416 ieee80211_tx_status_irqsafe(wdev->hw, skb);
417 }
418
wfx_skb_dtor(struct wfx_vif * wvif,struct sk_buff * skb)419 static void wfx_skb_dtor(struct wfx_vif *wvif, struct sk_buff *skb)
420 {
421 struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data;
422 struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body;
423 unsigned int offset = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) +
424 req->fc_offset;
425
426 if (!wvif) {
427 pr_warn("vif associated with the skb does not exist anymore\n");
428 return;
429 }
430 wfx_tx_policy_put(wvif, req->retry_policy_index);
431 skb_pull(skb, offset);
432 ieee80211_tx_status_irqsafe(wvif->wdev->hw, skb);
433 }
434
wfx_tx_fill_rates(struct wfx_dev * wdev,struct ieee80211_tx_info * tx_info,const struct wfx_hif_cnf_tx * arg)435 static void wfx_tx_fill_rates(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info,
436 const struct wfx_hif_cnf_tx *arg)
437 {
438 struct ieee80211_tx_rate *rate;
439 int tx_count;
440 int i;
441
442 tx_count = arg->ack_failures;
443 if (!arg->status || arg->ack_failures)
444 tx_count += 1; /* Also report success */
445 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
446 rate = &tx_info->status.rates[i];
447 if (rate->idx < 0)
448 break;
449 if (tx_count < rate->count && arg->status == HIF_STATUS_TX_FAIL_RETRIES &&
450 arg->ack_failures)
451 dev_dbg(wdev->dev, "all retries were not consumed: %d != %d\n",
452 rate->count, tx_count);
453 if (tx_count <= rate->count && tx_count &&
454 arg->txed_rate != wfx_get_hw_rate(wdev, rate))
455 dev_dbg(wdev->dev, "inconsistent tx_info rates: %d != %d\n",
456 arg->txed_rate, wfx_get_hw_rate(wdev, rate));
457 if (tx_count > rate->count) {
458 tx_count -= rate->count;
459 } else if (!tx_count) {
460 rate->count = 0;
461 rate->idx = -1;
462 } else {
463 rate->count = tx_count;
464 tx_count = 0;
465 }
466 }
467 if (tx_count)
468 dev_dbg(wdev->dev, "%d more retries than expected\n", tx_count);
469 }
470
wfx_tx_confirm_cb(struct wfx_dev * wdev,const struct wfx_hif_cnf_tx * arg)471 void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg)
472 {
473 const struct wfx_tx_priv *tx_priv;
474 struct ieee80211_tx_info *tx_info;
475 struct wfx_vif *wvif;
476 struct sk_buff *skb;
477
478 skb = wfx_pending_get(wdev, arg->packet_id);
479 if (!skb) {
480 dev_warn(wdev->dev, "received unknown packet_id (%#.8x) from chip\n",
481 arg->packet_id);
482 return;
483 }
484 tx_info = IEEE80211_SKB_CB(skb);
485 tx_priv = wfx_skb_tx_priv(skb);
486 wvif = wdev_to_wvif(wdev, ((struct wfx_hif_msg *)skb->data)->interface);
487 WARN_ON(!wvif);
488 if (!wvif)
489 return;
490
491 /* Note that wfx_pending_get_pkt_us_delay() get data from tx_info */
492 _trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb));
493 wfx_tx_fill_rates(wdev, tx_info, arg);
494 skb_trim(skb, skb->len - tx_priv->icv_size);
495
496 /* From now, you can touch to tx_info->status, but do not touch to tx_priv anymore */
497 /* FIXME: use ieee80211_tx_info_clear_status() */
498 memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data));
499 memset(tx_info->pad, 0, sizeof(tx_info->pad));
500
501 if (!arg->status) {
502 tx_info->status.tx_time = le32_to_cpu(arg->media_delay) -
503 le32_to_cpu(arg->tx_queue_delay);
504 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
505 tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
506 else
507 tx_info->flags |= IEEE80211_TX_STAT_ACK;
508 } else if (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) {
509 WARN(!arg->requeue, "incoherent status and result_flags");
510 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
511 wvif->after_dtim_tx_allowed = false; /* DTIM period elapsed */
512 schedule_work(&wvif->update_tim_work);
513 }
514 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
515 }
516 wfx_skb_dtor(wvif, skb);
517 }
518
wfx_flush_vif(struct wfx_vif * wvif,u32 queues,struct sk_buff_head * dropped)519 static void wfx_flush_vif(struct wfx_vif *wvif, u32 queues, struct sk_buff_head *dropped)
520 {
521 struct wfx_queue *queue;
522 int i;
523
524 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
525 if (!(BIT(i) & queues))
526 continue;
527 queue = &wvif->tx_queue[i];
528 if (dropped)
529 wfx_tx_queue_drop(wvif, queue, dropped);
530 }
531 if (wvif->wdev->chip_frozen)
532 return;
533 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
534 if (!(BIT(i) & queues))
535 continue;
536 queue = &wvif->tx_queue[i];
537 if (wait_event_timeout(wvif->wdev->tx_dequeue, wfx_tx_queue_empty(wvif, queue),
538 msecs_to_jiffies(1000)) <= 0)
539 dev_warn(wvif->wdev->dev, "frames queued while flushing tx queues?");
540 }
541 }
542
wfx_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)543 void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop)
544 {
545 struct wfx_dev *wdev = hw->priv;
546 struct sk_buff_head dropped;
547 struct wfx_vif *wvif;
548 struct wfx_hif_msg *hif;
549 struct sk_buff *skb;
550
551 skb_queue_head_init(&dropped);
552 if (vif) {
553 wvif = (struct wfx_vif *)vif->drv_priv;
554 wfx_flush_vif(wvif, queues, drop ? &dropped : NULL);
555 } else {
556 wvif = NULL;
557 while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
558 wfx_flush_vif(wvif, queues, drop ? &dropped : NULL);
559 }
560 wfx_tx_flush(wdev);
561 if (wdev->chip_frozen)
562 wfx_pending_drop(wdev, &dropped);
563 while ((skb = skb_dequeue(&dropped)) != NULL) {
564 hif = (struct wfx_hif_msg *)skb->data;
565 wvif = wdev_to_wvif(wdev, hif->interface);
566 ieee80211_tx_info_clear_status(IEEE80211_SKB_CB(skb));
567 wfx_skb_dtor(wvif, skb);
568 }
569 }
570