1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 */
7
8 #include "core.h"
9 #include "htc.h"
10 #include "htt.h"
11 #include "txrx.h"
12 #include "debug.h"
13 #include "trace.h"
14 #include "mac.h"
15
16 #include <linux/log2.h>
17 #include <linux/bitfield.h>
18
19 /* when under memory pressure rx ring refill may fail and needs a retry */
20 #define HTT_RX_RING_REFILL_RETRY_MS 50
21
22 #define HTT_RX_RING_REFILL_RESCHED_MS 5
23
24 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
25
26 static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k * ar,u64 paddr)27 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
28 {
29 struct ath10k_skb_rxcb *rxcb;
30
31 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
32 if (rxcb->paddr == paddr)
33 return ATH10K_RXCB_SKB(rxcb);
34
35 WARN_ON_ONCE(1);
36 return NULL;
37 }
38
ath10k_htt_rx_ring_free(struct ath10k_htt * htt)39 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
40 {
41 struct sk_buff *skb;
42 struct ath10k_skb_rxcb *rxcb;
43 struct hlist_node *n;
44 int i;
45
46 if (htt->rx_ring.in_ord_rx) {
47 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
48 skb = ATH10K_RXCB_SKB(rxcb);
49 dma_unmap_single(htt->ar->dev, rxcb->paddr,
50 skb->len + skb_tailroom(skb),
51 DMA_FROM_DEVICE);
52 hash_del(&rxcb->hlist);
53 dev_kfree_skb_any(skb);
54 }
55 } else {
56 for (i = 0; i < htt->rx_ring.size; i++) {
57 skb = htt->rx_ring.netbufs_ring[i];
58 if (!skb)
59 continue;
60
61 rxcb = ATH10K_SKB_RXCB(skb);
62 dma_unmap_single(htt->ar->dev, rxcb->paddr,
63 skb->len + skb_tailroom(skb),
64 DMA_FROM_DEVICE);
65 dev_kfree_skb_any(skb);
66 }
67 }
68
69 htt->rx_ring.fill_cnt = 0;
70 hash_init(htt->rx_ring.skb_table);
71 memset(htt->rx_ring.netbufs_ring, 0,
72 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
73 }
74
ath10k_htt_get_rx_ring_size_32(struct ath10k_htt * htt)75 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
76 {
77 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
78 }
79
ath10k_htt_get_rx_ring_size_64(struct ath10k_htt * htt)80 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
81 {
82 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
83 }
84
ath10k_htt_config_paddrs_ring_32(struct ath10k_htt * htt,void * vaddr)85 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
86 void *vaddr)
87 {
88 htt->rx_ring.paddrs_ring_32 = vaddr;
89 }
90
ath10k_htt_config_paddrs_ring_64(struct ath10k_htt * htt,void * vaddr)91 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
92 void *vaddr)
93 {
94 htt->rx_ring.paddrs_ring_64 = vaddr;
95 }
96
ath10k_htt_set_paddrs_ring_32(struct ath10k_htt * htt,dma_addr_t paddr,int idx)97 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
98 dma_addr_t paddr, int idx)
99 {
100 htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
101 }
102
ath10k_htt_set_paddrs_ring_64(struct ath10k_htt * htt,dma_addr_t paddr,int idx)103 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
104 dma_addr_t paddr, int idx)
105 {
106 htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
107 }
108
ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt * htt,int idx)109 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
110 {
111 htt->rx_ring.paddrs_ring_32[idx] = 0;
112 }
113
ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt * htt,int idx)114 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
115 {
116 htt->rx_ring.paddrs_ring_64[idx] = 0;
117 }
118
ath10k_htt_get_vaddr_ring_32(struct ath10k_htt * htt)119 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
120 {
121 return (void *)htt->rx_ring.paddrs_ring_32;
122 }
123
ath10k_htt_get_vaddr_ring_64(struct ath10k_htt * htt)124 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
125 {
126 return (void *)htt->rx_ring.paddrs_ring_64;
127 }
128
__ath10k_htt_rx_ring_fill_n(struct ath10k_htt * htt,int num)129 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
130 {
131 struct htt_rx_desc *rx_desc;
132 struct ath10k_skb_rxcb *rxcb;
133 struct sk_buff *skb;
134 dma_addr_t paddr;
135 int ret = 0, idx;
136
137 /* The Full Rx Reorder firmware has no way of telling the host
138 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
139 * To keep things simple make sure ring is always half empty. This
140 * guarantees there'll be no replenishment overruns possible.
141 */
142 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
143
144 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
145 while (num > 0) {
146 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
147 if (!skb) {
148 ret = -ENOMEM;
149 goto fail;
150 }
151
152 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
153 skb_pull(skb,
154 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
155 skb->data);
156
157 /* Clear rx_desc attention word before posting to Rx ring */
158 rx_desc = (struct htt_rx_desc *)skb->data;
159 rx_desc->attention.flags = __cpu_to_le32(0);
160
161 paddr = dma_map_single(htt->ar->dev, skb->data,
162 skb->len + skb_tailroom(skb),
163 DMA_FROM_DEVICE);
164
165 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
166 dev_kfree_skb_any(skb);
167 ret = -ENOMEM;
168 goto fail;
169 }
170
171 rxcb = ATH10K_SKB_RXCB(skb);
172 rxcb->paddr = paddr;
173 htt->rx_ring.netbufs_ring[idx] = skb;
174 ath10k_htt_set_paddrs_ring(htt, paddr, idx);
175 htt->rx_ring.fill_cnt++;
176
177 if (htt->rx_ring.in_ord_rx) {
178 hash_add(htt->rx_ring.skb_table,
179 &ATH10K_SKB_RXCB(skb)->hlist,
180 paddr);
181 }
182
183 num--;
184 idx++;
185 idx &= htt->rx_ring.size_mask;
186 }
187
188 fail:
189 /*
190 * Make sure the rx buffer is updated before available buffer
191 * index to avoid any potential rx ring corruption.
192 */
193 mb();
194 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
195 return ret;
196 }
197
ath10k_htt_rx_ring_fill_n(struct ath10k_htt * htt,int num)198 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
199 {
200 lockdep_assert_held(&htt->rx_ring.lock);
201 return __ath10k_htt_rx_ring_fill_n(htt, num);
202 }
203
ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt * htt)204 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
205 {
206 int ret, num_deficit, num_to_fill;
207
208 /* Refilling the whole RX ring buffer proves to be a bad idea. The
209 * reason is RX may take up significant amount of CPU cycles and starve
210 * other tasks, e.g. TX on an ethernet device while acting as a bridge
211 * with ath10k wlan interface. This ended up with very poor performance
212 * once CPU the host system was overwhelmed with RX on ath10k.
213 *
214 * By limiting the number of refills the replenishing occurs
215 * progressively. This in turns makes use of the fact tasklets are
216 * processed in FIFO order. This means actual RX processing can starve
217 * out refilling. If there's not enough buffers on RX ring FW will not
218 * report RX until it is refilled with enough buffers. This
219 * automatically balances load wrt to CPU power.
220 *
221 * This probably comes at a cost of lower maximum throughput but
222 * improves the average and stability.
223 */
224 spin_lock_bh(&htt->rx_ring.lock);
225 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
226 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
227 num_deficit -= num_to_fill;
228 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
229 if (ret == -ENOMEM) {
230 /*
231 * Failed to fill it to the desired level -
232 * we'll start a timer and try again next time.
233 * As long as enough buffers are left in the ring for
234 * another A-MPDU rx, no special recovery is needed.
235 */
236 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
237 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
238 } else if (num_deficit > 0) {
239 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
240 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
241 }
242 spin_unlock_bh(&htt->rx_ring.lock);
243 }
244
ath10k_htt_rx_ring_refill_retry(struct timer_list * t)245 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
246 {
247 struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
248
249 ath10k_htt_rx_msdu_buff_replenish(htt);
250 }
251
ath10k_htt_rx_ring_refill(struct ath10k * ar)252 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
253 {
254 struct ath10k_htt *htt = &ar->htt;
255 int ret;
256
257 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
258 return 0;
259
260 spin_lock_bh(&htt->rx_ring.lock);
261 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
262 htt->rx_ring.fill_cnt));
263
264 if (ret)
265 ath10k_htt_rx_ring_free(htt);
266
267 spin_unlock_bh(&htt->rx_ring.lock);
268
269 return ret;
270 }
271
ath10k_htt_rx_free(struct ath10k_htt * htt)272 void ath10k_htt_rx_free(struct ath10k_htt *htt)
273 {
274 if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
275 return;
276
277 del_timer_sync(&htt->rx_ring.refill_retry_timer);
278
279 skb_queue_purge(&htt->rx_msdus_q);
280 skb_queue_purge(&htt->rx_in_ord_compl_q);
281 skb_queue_purge(&htt->tx_fetch_ind_q);
282
283 spin_lock_bh(&htt->rx_ring.lock);
284 ath10k_htt_rx_ring_free(htt);
285 spin_unlock_bh(&htt->rx_ring.lock);
286
287 dma_free_coherent(htt->ar->dev,
288 ath10k_htt_get_rx_ring_size(htt),
289 ath10k_htt_get_vaddr_ring(htt),
290 htt->rx_ring.base_paddr);
291
292 dma_free_coherent(htt->ar->dev,
293 sizeof(*htt->rx_ring.alloc_idx.vaddr),
294 htt->rx_ring.alloc_idx.vaddr,
295 htt->rx_ring.alloc_idx.paddr);
296
297 kfree(htt->rx_ring.netbufs_ring);
298 }
299
ath10k_htt_rx_netbuf_pop(struct ath10k_htt * htt)300 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
301 {
302 struct ath10k *ar = htt->ar;
303 int idx;
304 struct sk_buff *msdu;
305
306 lockdep_assert_held(&htt->rx_ring.lock);
307
308 if (htt->rx_ring.fill_cnt == 0) {
309 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
310 return NULL;
311 }
312
313 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
314 msdu = htt->rx_ring.netbufs_ring[idx];
315 htt->rx_ring.netbufs_ring[idx] = NULL;
316 ath10k_htt_reset_paddrs_ring(htt, idx);
317
318 idx++;
319 idx &= htt->rx_ring.size_mask;
320 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
321 htt->rx_ring.fill_cnt--;
322
323 dma_unmap_single(htt->ar->dev,
324 ATH10K_SKB_RXCB(msdu)->paddr,
325 msdu->len + skb_tailroom(msdu),
326 DMA_FROM_DEVICE);
327 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
328 msdu->data, msdu->len + skb_tailroom(msdu));
329
330 return msdu;
331 }
332
333 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
ath10k_htt_rx_amsdu_pop(struct ath10k_htt * htt,struct sk_buff_head * amsdu)334 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
335 struct sk_buff_head *amsdu)
336 {
337 struct ath10k *ar = htt->ar;
338 int msdu_len, msdu_chaining = 0;
339 struct sk_buff *msdu;
340 struct htt_rx_desc *rx_desc;
341
342 lockdep_assert_held(&htt->rx_ring.lock);
343
344 for (;;) {
345 int last_msdu, msdu_len_invalid, msdu_chained;
346
347 msdu = ath10k_htt_rx_netbuf_pop(htt);
348 if (!msdu) {
349 __skb_queue_purge(amsdu);
350 return -ENOENT;
351 }
352
353 __skb_queue_tail(amsdu, msdu);
354
355 rx_desc = (struct htt_rx_desc *)msdu->data;
356
357 /* FIXME: we must report msdu payload since this is what caller
358 * expects now
359 */
360 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
361 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
362
363 /*
364 * Sanity check - confirm the HW is finished filling in the
365 * rx data.
366 * If the HW and SW are working correctly, then it's guaranteed
367 * that the HW's MAC DMA is done before this point in the SW.
368 * To prevent the case that we handle a stale Rx descriptor,
369 * just assert for now until we have a way to recover.
370 */
371 if (!(__le32_to_cpu(rx_desc->attention.flags)
372 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
373 __skb_queue_purge(amsdu);
374 return -EIO;
375 }
376
377 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
378 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
379 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
380 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
381 RX_MSDU_START_INFO0_MSDU_LENGTH);
382 msdu_chained = rx_desc->frag_info.ring2_more_count;
383
384 if (msdu_len_invalid)
385 msdu_len = 0;
386
387 skb_trim(msdu, 0);
388 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
389 msdu_len -= msdu->len;
390
391 /* Note: Chained buffers do not contain rx descriptor */
392 while (msdu_chained--) {
393 msdu = ath10k_htt_rx_netbuf_pop(htt);
394 if (!msdu) {
395 __skb_queue_purge(amsdu);
396 return -ENOENT;
397 }
398
399 __skb_queue_tail(amsdu, msdu);
400 skb_trim(msdu, 0);
401 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
402 msdu_len -= msdu->len;
403 msdu_chaining = 1;
404 }
405
406 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
407 RX_MSDU_END_INFO0_LAST_MSDU;
408
409 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
410 sizeof(*rx_desc) - sizeof(u32));
411
412 if (last_msdu)
413 break;
414 }
415
416 if (skb_queue_empty(amsdu))
417 msdu_chaining = -1;
418
419 /*
420 * Don't refill the ring yet.
421 *
422 * First, the elements popped here are still in use - it is not
423 * safe to overwrite them until the matching call to
424 * mpdu_desc_list_next. Second, for efficiency it is preferable to
425 * refill the rx ring with 1 PPDU's worth of rx buffers (something
426 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
427 * (something like 3 buffers). Consequently, we'll rely on the txrx
428 * SW to tell us when it is done pulling all the PPDU's rx buffers
429 * out of the rx ring, and then refill it just once.
430 */
431
432 return msdu_chaining;
433 }
434
ath10k_htt_rx_pop_paddr(struct ath10k_htt * htt,u64 paddr)435 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
436 u64 paddr)
437 {
438 struct ath10k *ar = htt->ar;
439 struct ath10k_skb_rxcb *rxcb;
440 struct sk_buff *msdu;
441
442 lockdep_assert_held(&htt->rx_ring.lock);
443
444 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
445 if (!msdu)
446 return NULL;
447
448 rxcb = ATH10K_SKB_RXCB(msdu);
449 hash_del(&rxcb->hlist);
450 htt->rx_ring.fill_cnt--;
451
452 dma_unmap_single(htt->ar->dev, rxcb->paddr,
453 msdu->len + skb_tailroom(msdu),
454 DMA_FROM_DEVICE);
455 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
456 msdu->data, msdu->len + skb_tailroom(msdu));
457
458 return msdu;
459 }
460
ath10k_htt_append_frag_list(struct sk_buff * skb_head,struct sk_buff * frag_list,unsigned int frag_len)461 static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
462 struct sk_buff *frag_list,
463 unsigned int frag_len)
464 {
465 skb_shinfo(skb_head)->frag_list = frag_list;
466 skb_head->data_len = frag_len;
467 skb_head->len += skb_head->data_len;
468 }
469
ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt * htt,struct sk_buff * msdu,struct htt_rx_in_ord_msdu_desc ** msdu_desc)470 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
471 struct sk_buff *msdu,
472 struct htt_rx_in_ord_msdu_desc **msdu_desc)
473 {
474 struct ath10k *ar = htt->ar;
475 u32 paddr;
476 struct sk_buff *frag_buf;
477 struct sk_buff *prev_frag_buf;
478 u8 last_frag;
479 struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
480 struct htt_rx_desc *rxd;
481 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
482
483 rxd = (void *)msdu->data;
484 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
485
486 skb_put(msdu, sizeof(struct htt_rx_desc));
487 skb_pull(msdu, sizeof(struct htt_rx_desc));
488 skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
489 amsdu_len -= msdu->len;
490
491 last_frag = ind_desc->reserved;
492 if (last_frag) {
493 if (amsdu_len) {
494 ath10k_warn(ar, "invalid amsdu len %u, left %d",
495 __le16_to_cpu(ind_desc->msdu_len),
496 amsdu_len);
497 }
498 return 0;
499 }
500
501 ind_desc++;
502 paddr = __le32_to_cpu(ind_desc->msdu_paddr);
503 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
504 if (!frag_buf) {
505 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
506 return -ENOENT;
507 }
508
509 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
510 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
511
512 amsdu_len -= frag_buf->len;
513 prev_frag_buf = frag_buf;
514 last_frag = ind_desc->reserved;
515 while (!last_frag) {
516 ind_desc++;
517 paddr = __le32_to_cpu(ind_desc->msdu_paddr);
518 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
519 if (!frag_buf) {
520 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
521 paddr);
522 prev_frag_buf->next = NULL;
523 return -ENOENT;
524 }
525
526 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
527 last_frag = ind_desc->reserved;
528 amsdu_len -= frag_buf->len;
529
530 prev_frag_buf->next = frag_buf;
531 prev_frag_buf = frag_buf;
532 }
533
534 if (amsdu_len) {
535 ath10k_warn(ar, "invalid amsdu len %u, left %d",
536 __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
537 }
538
539 *msdu_desc = ind_desc;
540
541 prev_frag_buf->next = NULL;
542 return 0;
543 }
544
545 static int
ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt * htt,struct sk_buff * msdu,struct htt_rx_in_ord_msdu_desc_ext ** msdu_desc)546 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
547 struct sk_buff *msdu,
548 struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
549 {
550 struct ath10k *ar = htt->ar;
551 u64 paddr;
552 struct sk_buff *frag_buf;
553 struct sk_buff *prev_frag_buf;
554 u8 last_frag;
555 struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
556 struct htt_rx_desc *rxd;
557 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
558
559 rxd = (void *)msdu->data;
560 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
561
562 skb_put(msdu, sizeof(struct htt_rx_desc));
563 skb_pull(msdu, sizeof(struct htt_rx_desc));
564 skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
565 amsdu_len -= msdu->len;
566
567 last_frag = ind_desc->reserved;
568 if (last_frag) {
569 if (amsdu_len) {
570 ath10k_warn(ar, "invalid amsdu len %u, left %d",
571 __le16_to_cpu(ind_desc->msdu_len),
572 amsdu_len);
573 }
574 return 0;
575 }
576
577 ind_desc++;
578 paddr = __le64_to_cpu(ind_desc->msdu_paddr);
579 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
580 if (!frag_buf) {
581 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
582 return -ENOENT;
583 }
584
585 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
586 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
587
588 amsdu_len -= frag_buf->len;
589 prev_frag_buf = frag_buf;
590 last_frag = ind_desc->reserved;
591 while (!last_frag) {
592 ind_desc++;
593 paddr = __le64_to_cpu(ind_desc->msdu_paddr);
594 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
595 if (!frag_buf) {
596 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
597 paddr);
598 prev_frag_buf->next = NULL;
599 return -ENOENT;
600 }
601
602 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
603 last_frag = ind_desc->reserved;
604 amsdu_len -= frag_buf->len;
605
606 prev_frag_buf->next = frag_buf;
607 prev_frag_buf = frag_buf;
608 }
609
610 if (amsdu_len) {
611 ath10k_warn(ar, "invalid amsdu len %u, left %d",
612 __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
613 }
614
615 *msdu_desc = ind_desc;
616
617 prev_frag_buf->next = NULL;
618 return 0;
619 }
620
ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt * htt,struct htt_rx_in_ord_ind * ev,struct sk_buff_head * list)621 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
622 struct htt_rx_in_ord_ind *ev,
623 struct sk_buff_head *list)
624 {
625 struct ath10k *ar = htt->ar;
626 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
627 struct htt_rx_desc *rxd;
628 struct sk_buff *msdu;
629 int msdu_count, ret;
630 bool is_offload;
631 u32 paddr;
632
633 lockdep_assert_held(&htt->rx_ring.lock);
634
635 msdu_count = __le16_to_cpu(ev->msdu_count);
636 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
637
638 while (msdu_count--) {
639 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
640
641 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
642 if (!msdu) {
643 __skb_queue_purge(list);
644 return -ENOENT;
645 }
646
647 if (!is_offload && ar->monitor_arvif) {
648 ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
649 &msdu_desc);
650 if (ret) {
651 __skb_queue_purge(list);
652 return ret;
653 }
654 __skb_queue_tail(list, msdu);
655 msdu_desc++;
656 continue;
657 }
658
659 __skb_queue_tail(list, msdu);
660
661 if (!is_offload) {
662 rxd = (void *)msdu->data;
663
664 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
665
666 skb_put(msdu, sizeof(*rxd));
667 skb_pull(msdu, sizeof(*rxd));
668 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
669
670 if (!(__le32_to_cpu(rxd->attention.flags) &
671 RX_ATTENTION_FLAGS_MSDU_DONE)) {
672 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
673 return -EIO;
674 }
675 }
676
677 msdu_desc++;
678 }
679
680 return 0;
681 }
682
ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt * htt,struct htt_rx_in_ord_ind * ev,struct sk_buff_head * list)683 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
684 struct htt_rx_in_ord_ind *ev,
685 struct sk_buff_head *list)
686 {
687 struct ath10k *ar = htt->ar;
688 struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
689 struct htt_rx_desc *rxd;
690 struct sk_buff *msdu;
691 int msdu_count, ret;
692 bool is_offload;
693 u64 paddr;
694
695 lockdep_assert_held(&htt->rx_ring.lock);
696
697 msdu_count = __le16_to_cpu(ev->msdu_count);
698 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
699
700 while (msdu_count--) {
701 paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
702 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
703 if (!msdu) {
704 __skb_queue_purge(list);
705 return -ENOENT;
706 }
707
708 if (!is_offload && ar->monitor_arvif) {
709 ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
710 &msdu_desc);
711 if (ret) {
712 __skb_queue_purge(list);
713 return ret;
714 }
715 __skb_queue_tail(list, msdu);
716 msdu_desc++;
717 continue;
718 }
719
720 __skb_queue_tail(list, msdu);
721
722 if (!is_offload) {
723 rxd = (void *)msdu->data;
724
725 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
726
727 skb_put(msdu, sizeof(*rxd));
728 skb_pull(msdu, sizeof(*rxd));
729 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
730
731 if (!(__le32_to_cpu(rxd->attention.flags) &
732 RX_ATTENTION_FLAGS_MSDU_DONE)) {
733 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
734 return -EIO;
735 }
736 }
737
738 msdu_desc++;
739 }
740
741 return 0;
742 }
743
ath10k_htt_rx_alloc(struct ath10k_htt * htt)744 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
745 {
746 struct ath10k *ar = htt->ar;
747 dma_addr_t paddr;
748 void *vaddr, *vaddr_ring;
749 size_t size;
750 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
751
752 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
753 return 0;
754
755 htt->rx_confused = false;
756
757 /* XXX: The fill level could be changed during runtime in response to
758 * the host processing latency. Is this really worth it?
759 */
760 htt->rx_ring.size = HTT_RX_RING_SIZE;
761 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
762 htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
763
764 if (!is_power_of_2(htt->rx_ring.size)) {
765 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
766 return -EINVAL;
767 }
768
769 htt->rx_ring.netbufs_ring =
770 kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
771 GFP_KERNEL);
772 if (!htt->rx_ring.netbufs_ring)
773 goto err_netbuf;
774
775 size = ath10k_htt_get_rx_ring_size(htt);
776
777 vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
778 if (!vaddr_ring)
779 goto err_dma_ring;
780
781 ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
782 htt->rx_ring.base_paddr = paddr;
783
784 vaddr = dma_alloc_coherent(htt->ar->dev,
785 sizeof(*htt->rx_ring.alloc_idx.vaddr),
786 &paddr, GFP_KERNEL);
787 if (!vaddr)
788 goto err_dma_idx;
789
790 htt->rx_ring.alloc_idx.vaddr = vaddr;
791 htt->rx_ring.alloc_idx.paddr = paddr;
792 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
793 *htt->rx_ring.alloc_idx.vaddr = 0;
794
795 /* Initialize the Rx refill retry timer */
796 timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
797
798 spin_lock_init(&htt->rx_ring.lock);
799
800 htt->rx_ring.fill_cnt = 0;
801 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
802 hash_init(htt->rx_ring.skb_table);
803
804 skb_queue_head_init(&htt->rx_msdus_q);
805 skb_queue_head_init(&htt->rx_in_ord_compl_q);
806 skb_queue_head_init(&htt->tx_fetch_ind_q);
807 atomic_set(&htt->num_mpdus_ready, 0);
808
809 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
810 htt->rx_ring.size, htt->rx_ring.fill_level);
811 return 0;
812
813 err_dma_idx:
814 dma_free_coherent(htt->ar->dev,
815 ath10k_htt_get_rx_ring_size(htt),
816 vaddr_ring,
817 htt->rx_ring.base_paddr);
818 err_dma_ring:
819 kfree(htt->rx_ring.netbufs_ring);
820 err_netbuf:
821 return -ENOMEM;
822 }
823
ath10k_htt_rx_crypto_param_len(struct ath10k * ar,enum htt_rx_mpdu_encrypt_type type)824 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
825 enum htt_rx_mpdu_encrypt_type type)
826 {
827 switch (type) {
828 case HTT_RX_MPDU_ENCRYPT_NONE:
829 return 0;
830 case HTT_RX_MPDU_ENCRYPT_WEP40:
831 case HTT_RX_MPDU_ENCRYPT_WEP104:
832 return IEEE80211_WEP_IV_LEN;
833 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
834 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
835 return IEEE80211_TKIP_IV_LEN;
836 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
837 return IEEE80211_CCMP_HDR_LEN;
838 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
839 return IEEE80211_CCMP_256_HDR_LEN;
840 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
841 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
842 return IEEE80211_GCMP_HDR_LEN;
843 case HTT_RX_MPDU_ENCRYPT_WEP128:
844 case HTT_RX_MPDU_ENCRYPT_WAPI:
845 break;
846 }
847
848 ath10k_warn(ar, "unsupported encryption type %d\n", type);
849 return 0;
850 }
851
852 #define MICHAEL_MIC_LEN 8
853
ath10k_htt_rx_crypto_mic_len(struct ath10k * ar,enum htt_rx_mpdu_encrypt_type type)854 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
855 enum htt_rx_mpdu_encrypt_type type)
856 {
857 switch (type) {
858 case HTT_RX_MPDU_ENCRYPT_NONE:
859 case HTT_RX_MPDU_ENCRYPT_WEP40:
860 case HTT_RX_MPDU_ENCRYPT_WEP104:
861 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
862 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
863 return 0;
864 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
865 return IEEE80211_CCMP_MIC_LEN;
866 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
867 return IEEE80211_CCMP_256_MIC_LEN;
868 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
869 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
870 return IEEE80211_GCMP_MIC_LEN;
871 case HTT_RX_MPDU_ENCRYPT_WEP128:
872 case HTT_RX_MPDU_ENCRYPT_WAPI:
873 break;
874 }
875
876 ath10k_warn(ar, "unsupported encryption type %d\n", type);
877 return 0;
878 }
879
ath10k_htt_rx_crypto_icv_len(struct ath10k * ar,enum htt_rx_mpdu_encrypt_type type)880 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
881 enum htt_rx_mpdu_encrypt_type type)
882 {
883 switch (type) {
884 case HTT_RX_MPDU_ENCRYPT_NONE:
885 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
886 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
887 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
888 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
889 return 0;
890 case HTT_RX_MPDU_ENCRYPT_WEP40:
891 case HTT_RX_MPDU_ENCRYPT_WEP104:
892 return IEEE80211_WEP_ICV_LEN;
893 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
894 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
895 return IEEE80211_TKIP_ICV_LEN;
896 case HTT_RX_MPDU_ENCRYPT_WEP128:
897 case HTT_RX_MPDU_ENCRYPT_WAPI:
898 break;
899 }
900
901 ath10k_warn(ar, "unsupported encryption type %d\n", type);
902 return 0;
903 }
904
905 struct amsdu_subframe_hdr {
906 u8 dst[ETH_ALEN];
907 u8 src[ETH_ALEN];
908 __be16 len;
909 } __packed;
910
911 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
912
ath10k_bw_to_mac80211_bw(u8 bw)913 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
914 {
915 u8 ret = 0;
916
917 switch (bw) {
918 case 0:
919 ret = RATE_INFO_BW_20;
920 break;
921 case 1:
922 ret = RATE_INFO_BW_40;
923 break;
924 case 2:
925 ret = RATE_INFO_BW_80;
926 break;
927 case 3:
928 ret = RATE_INFO_BW_160;
929 break;
930 }
931
932 return ret;
933 }
934
ath10k_htt_rx_h_rates(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd)935 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
936 struct ieee80211_rx_status *status,
937 struct htt_rx_desc *rxd)
938 {
939 struct ieee80211_supported_band *sband;
940 u8 cck, rate, bw, sgi, mcs, nss;
941 u8 preamble = 0;
942 u8 group_id;
943 u32 info1, info2, info3;
944
945 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
946 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
947 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
948
949 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
950
951 switch (preamble) {
952 case HTT_RX_LEGACY:
953 /* To get legacy rate index band is required. Since band can't
954 * be undefined check if freq is non-zero.
955 */
956 if (!status->freq)
957 return;
958
959 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
960 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
961 rate &= ~RX_PPDU_START_RATE_FLAG;
962
963 sband = &ar->mac.sbands[status->band];
964 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
965 break;
966 case HTT_RX_HT:
967 case HTT_RX_HT_WITH_TXBF:
968 /* HT-SIG - Table 20-11 in info2 and info3 */
969 mcs = info2 & 0x1F;
970 nss = mcs >> 3;
971 bw = (info2 >> 7) & 1;
972 sgi = (info3 >> 7) & 1;
973
974 status->rate_idx = mcs;
975 status->encoding = RX_ENC_HT;
976 if (sgi)
977 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
978 if (bw)
979 status->bw = RATE_INFO_BW_40;
980 break;
981 case HTT_RX_VHT:
982 case HTT_RX_VHT_WITH_TXBF:
983 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
984 * TODO check this
985 */
986 bw = info2 & 3;
987 sgi = info3 & 1;
988 group_id = (info2 >> 4) & 0x3F;
989
990 if (GROUP_ID_IS_SU_MIMO(group_id)) {
991 mcs = (info3 >> 4) & 0x0F;
992 nss = ((info2 >> 10) & 0x07) + 1;
993 } else {
994 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
995 * so it's impossible to decode MCS. Also since
996 * firmware consumes Group Id Management frames host
997 * has no knowledge regarding group/user position
998 * mapping so it's impossible to pick the correct Nsts
999 * from VHT-SIG-A1.
1000 *
1001 * Bandwidth and SGI are valid so report the rateinfo
1002 * on best-effort basis.
1003 */
1004 mcs = 0;
1005 nss = 1;
1006 }
1007
1008 if (mcs > 0x09) {
1009 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
1010 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1011 __le32_to_cpu(rxd->attention.flags),
1012 __le32_to_cpu(rxd->mpdu_start.info0),
1013 __le32_to_cpu(rxd->mpdu_start.info1),
1014 __le32_to_cpu(rxd->msdu_start.common.info0),
1015 __le32_to_cpu(rxd->msdu_start.common.info1),
1016 rxd->ppdu_start.info0,
1017 __le32_to_cpu(rxd->ppdu_start.info1),
1018 __le32_to_cpu(rxd->ppdu_start.info2),
1019 __le32_to_cpu(rxd->ppdu_start.info3),
1020 __le32_to_cpu(rxd->ppdu_start.info4));
1021
1022 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
1023 __le32_to_cpu(rxd->msdu_end.common.info0),
1024 __le32_to_cpu(rxd->mpdu_end.info0));
1025
1026 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
1027 "rx desc msdu payload: ",
1028 rxd->msdu_payload, 50);
1029 }
1030
1031 status->rate_idx = mcs;
1032 status->nss = nss;
1033
1034 if (sgi)
1035 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1036
1037 status->bw = ath10k_bw_to_mac80211_bw(bw);
1038 status->encoding = RX_ENC_VHT;
1039 break;
1040 default:
1041 break;
1042 }
1043 }
1044
1045 static struct ieee80211_channel *
ath10k_htt_rx_h_peer_channel(struct ath10k * ar,struct htt_rx_desc * rxd)1046 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
1047 {
1048 struct ath10k_peer *peer;
1049 struct ath10k_vif *arvif;
1050 struct cfg80211_chan_def def;
1051 u16 peer_id;
1052
1053 lockdep_assert_held(&ar->data_lock);
1054
1055 if (!rxd)
1056 return NULL;
1057
1058 if (rxd->attention.flags &
1059 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
1060 return NULL;
1061
1062 if (!(rxd->msdu_end.common.info0 &
1063 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
1064 return NULL;
1065
1066 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1067 RX_MPDU_START_INFO0_PEER_IDX);
1068
1069 peer = ath10k_peer_find_by_id(ar, peer_id);
1070 if (!peer)
1071 return NULL;
1072
1073 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1074 if (WARN_ON_ONCE(!arvif))
1075 return NULL;
1076
1077 if (ath10k_mac_vif_chan(arvif->vif, &def))
1078 return NULL;
1079
1080 return def.chan;
1081 }
1082
1083 static struct ieee80211_channel *
ath10k_htt_rx_h_vdev_channel(struct ath10k * ar,u32 vdev_id)1084 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
1085 {
1086 struct ath10k_vif *arvif;
1087 struct cfg80211_chan_def def;
1088
1089 lockdep_assert_held(&ar->data_lock);
1090
1091 list_for_each_entry(arvif, &ar->arvifs, list) {
1092 if (arvif->vdev_id == vdev_id &&
1093 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
1094 return def.chan;
1095 }
1096
1097 return NULL;
1098 }
1099
1100 static void
ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)1101 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
1102 struct ieee80211_chanctx_conf *conf,
1103 void *data)
1104 {
1105 struct cfg80211_chan_def *def = data;
1106
1107 *def = conf->def;
1108 }
1109
1110 static struct ieee80211_channel *
ath10k_htt_rx_h_any_channel(struct ath10k * ar)1111 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
1112 {
1113 struct cfg80211_chan_def def = {};
1114
1115 ieee80211_iter_chan_contexts_atomic(ar->hw,
1116 ath10k_htt_rx_h_any_chan_iter,
1117 &def);
1118
1119 return def.chan;
1120 }
1121
ath10k_htt_rx_h_channel(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd,u32 vdev_id)1122 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
1123 struct ieee80211_rx_status *status,
1124 struct htt_rx_desc *rxd,
1125 u32 vdev_id)
1126 {
1127 struct ieee80211_channel *ch;
1128
1129 spin_lock_bh(&ar->data_lock);
1130 ch = ar->scan_channel;
1131 if (!ch)
1132 ch = ar->rx_channel;
1133 if (!ch)
1134 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
1135 if (!ch)
1136 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
1137 if (!ch)
1138 ch = ath10k_htt_rx_h_any_channel(ar);
1139 if (!ch)
1140 ch = ar->tgt_oper_chan;
1141 spin_unlock_bh(&ar->data_lock);
1142
1143 if (!ch)
1144 return false;
1145
1146 status->band = ch->band;
1147 status->freq = ch->center_freq;
1148
1149 return true;
1150 }
1151
ath10k_htt_rx_h_signal(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd)1152 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
1153 struct ieee80211_rx_status *status,
1154 struct htt_rx_desc *rxd)
1155 {
1156 int i;
1157
1158 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
1159 status->chains &= ~BIT(i);
1160
1161 if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
1162 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
1163 rxd->ppdu_start.rssi_chains[i].pri20_mhz;
1164
1165 status->chains |= BIT(i);
1166 }
1167 }
1168
1169 /* FIXME: Get real NF */
1170 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1171 rxd->ppdu_start.rssi_comb;
1172 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
1173 }
1174
ath10k_htt_rx_h_mactime(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd)1175 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
1176 struct ieee80211_rx_status *status,
1177 struct htt_rx_desc *rxd)
1178 {
1179 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1180 * means all prior MSDUs in a PPDU are reported to mac80211 without the
1181 * TSF. Is it worth holding frames until end of PPDU is known?
1182 *
1183 * FIXME: Can we get/compute 64bit TSF?
1184 */
1185 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
1186 status->flag |= RX_FLAG_MACTIME_END;
1187 }
1188
ath10k_htt_rx_h_ppdu(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * status,u32 vdev_id)1189 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1190 struct sk_buff_head *amsdu,
1191 struct ieee80211_rx_status *status,
1192 u32 vdev_id)
1193 {
1194 struct sk_buff *first;
1195 struct htt_rx_desc *rxd;
1196 bool is_first_ppdu;
1197 bool is_last_ppdu;
1198
1199 if (skb_queue_empty(amsdu))
1200 return;
1201
1202 first = skb_peek(amsdu);
1203 rxd = (void *)first->data - sizeof(*rxd);
1204
1205 is_first_ppdu = !!(rxd->attention.flags &
1206 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1207 is_last_ppdu = !!(rxd->attention.flags &
1208 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1209
1210 if (is_first_ppdu) {
1211 /* New PPDU starts so clear out the old per-PPDU status. */
1212 status->freq = 0;
1213 status->rate_idx = 0;
1214 status->nss = 0;
1215 status->encoding = RX_ENC_LEGACY;
1216 status->bw = RATE_INFO_BW_20;
1217
1218 status->flag &= ~RX_FLAG_MACTIME_END;
1219 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1220
1221 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1222 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1223 status->ampdu_reference = ar->ampdu_reference;
1224
1225 ath10k_htt_rx_h_signal(ar, status, rxd);
1226 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1227 ath10k_htt_rx_h_rates(ar, status, rxd);
1228 }
1229
1230 if (is_last_ppdu) {
1231 ath10k_htt_rx_h_mactime(ar, status, rxd);
1232
1233 /* set ampdu last segment flag */
1234 status->flag |= RX_FLAG_AMPDU_IS_LAST;
1235 ar->ampdu_reference++;
1236 }
1237 }
1238
1239 static const char * const tid_to_ac[] = {
1240 "BE",
1241 "BK",
1242 "BK",
1243 "BE",
1244 "VI",
1245 "VI",
1246 "VO",
1247 "VO",
1248 };
1249
ath10k_get_tid(struct ieee80211_hdr * hdr,char * out,size_t size)1250 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1251 {
1252 u8 *qc;
1253 int tid;
1254
1255 if (!ieee80211_is_data_qos(hdr->frame_control))
1256 return "";
1257
1258 qc = ieee80211_get_qos_ctl(hdr);
1259 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1260 if (tid < 8)
1261 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1262 else
1263 snprintf(out, size, "tid %d", tid);
1264
1265 return out;
1266 }
1267
ath10k_htt_rx_h_queue_msdu(struct ath10k * ar,struct ieee80211_rx_status * rx_status,struct sk_buff * skb)1268 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1269 struct ieee80211_rx_status *rx_status,
1270 struct sk_buff *skb)
1271 {
1272 struct ieee80211_rx_status *status;
1273
1274 status = IEEE80211_SKB_RXCB(skb);
1275 *status = *rx_status;
1276
1277 skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1278 }
1279
ath10k_process_rx(struct ath10k * ar,struct sk_buff * skb)1280 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1281 {
1282 struct ieee80211_rx_status *status;
1283 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1284 char tid[32];
1285
1286 status = IEEE80211_SKB_RXCB(skb);
1287
1288 ath10k_dbg(ar, ATH10K_DBG_DATA,
1289 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1290 skb,
1291 skb->len,
1292 ieee80211_get_SA(hdr),
1293 ath10k_get_tid(hdr, tid, sizeof(tid)),
1294 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1295 "mcast" : "ucast",
1296 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
1297 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1298 (status->encoding == RX_ENC_HT) ? "ht" : "",
1299 (status->encoding == RX_ENC_VHT) ? "vht" : "",
1300 (status->bw == RATE_INFO_BW_40) ? "40" : "",
1301 (status->bw == RATE_INFO_BW_80) ? "80" : "",
1302 (status->bw == RATE_INFO_BW_160) ? "160" : "",
1303 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1304 status->rate_idx,
1305 status->nss,
1306 status->freq,
1307 status->band, status->flag,
1308 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1309 !!(status->flag & RX_FLAG_MMIC_ERROR),
1310 !!(status->flag & RX_FLAG_AMSDU_MORE));
1311 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1312 skb->data, skb->len);
1313 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1314 trace_ath10k_rx_payload(ar, skb->data, skb->len);
1315
1316 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1317 }
1318
ath10k_htt_rx_nwifi_hdrlen(struct ath10k * ar,struct ieee80211_hdr * hdr)1319 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1320 struct ieee80211_hdr *hdr)
1321 {
1322 int len = ieee80211_hdrlen(hdr->frame_control);
1323
1324 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1325 ar->running_fw->fw_file.fw_features))
1326 len = round_up(len, 4);
1327
1328 return len;
1329 }
1330
ath10k_htt_rx_h_undecap_raw(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,enum htt_rx_mpdu_encrypt_type enctype,bool is_decrypted,const u8 first_hdr[64])1331 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1332 struct sk_buff *msdu,
1333 struct ieee80211_rx_status *status,
1334 enum htt_rx_mpdu_encrypt_type enctype,
1335 bool is_decrypted,
1336 const u8 first_hdr[64])
1337 {
1338 struct ieee80211_hdr *hdr;
1339 struct htt_rx_desc *rxd;
1340 size_t hdr_len;
1341 size_t crypto_len;
1342 bool is_first;
1343 bool is_last;
1344 bool msdu_limit_err;
1345 int bytes_aligned = ar->hw_params.decap_align_bytes;
1346 u8 *qos;
1347
1348 rxd = (void *)msdu->data - sizeof(*rxd);
1349 is_first = !!(rxd->msdu_end.common.info0 &
1350 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1351 is_last = !!(rxd->msdu_end.common.info0 &
1352 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1353
1354 /* Delivered decapped frame:
1355 * [802.11 header]
1356 * [crypto param] <-- can be trimmed if !fcs_err &&
1357 * !decrypt_err && !peer_idx_invalid
1358 * [amsdu header] <-- only if A-MSDU
1359 * [rfc1042/llc]
1360 * [payload]
1361 * [FCS] <-- at end, needs to be trimmed
1362 */
1363
1364 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1365 * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1366 * error packets. If limit exceeds, hw sends all remaining MSDUs as
1367 * a single last MSDU with this msdu limit error set.
1368 */
1369 msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd);
1370
1371 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1372 * without first MSDU is expected in that case, and handled later here.
1373 */
1374 /* This probably shouldn't happen but warn just in case */
1375 if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
1376 return;
1377
1378 /* This probably shouldn't happen but warn just in case */
1379 if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
1380 return;
1381
1382 skb_trim(msdu, msdu->len - FCS_LEN);
1383
1384 /* Push original 80211 header */
1385 if (unlikely(msdu_limit_err)) {
1386 hdr = (struct ieee80211_hdr *)first_hdr;
1387 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1388 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1389
1390 if (ieee80211_is_data_qos(hdr->frame_control)) {
1391 qos = ieee80211_get_qos_ctl(hdr);
1392 qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1393 }
1394
1395 if (crypto_len)
1396 memcpy(skb_push(msdu, crypto_len),
1397 (void *)hdr + round_up(hdr_len, bytes_aligned),
1398 crypto_len);
1399
1400 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1401 }
1402
1403 /* In most cases this will be true for sniffed frames. It makes sense
1404 * to deliver them as-is without stripping the crypto param. This is
1405 * necessary for software based decryption.
1406 *
1407 * If there's no error then the frame is decrypted. At least that is
1408 * the case for frames that come in via fragmented rx indication.
1409 */
1410 if (!is_decrypted)
1411 return;
1412
1413 /* The payload is decrypted so strip crypto params. Start from tail
1414 * since hdr is used to compute some stuff.
1415 */
1416
1417 hdr = (void *)msdu->data;
1418
1419 /* Tail */
1420 if (status->flag & RX_FLAG_IV_STRIPPED) {
1421 skb_trim(msdu, msdu->len -
1422 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1423
1424 skb_trim(msdu, msdu->len -
1425 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1426 } else {
1427 /* MIC */
1428 if (status->flag & RX_FLAG_MIC_STRIPPED)
1429 skb_trim(msdu, msdu->len -
1430 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1431
1432 /* ICV */
1433 if (status->flag & RX_FLAG_ICV_STRIPPED)
1434 skb_trim(msdu, msdu->len -
1435 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1436 }
1437
1438 /* MMIC */
1439 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1440 !ieee80211_has_morefrags(hdr->frame_control) &&
1441 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1442 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1443
1444 /* Head */
1445 if (status->flag & RX_FLAG_IV_STRIPPED) {
1446 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1447 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1448
1449 memmove((void *)msdu->data + crypto_len,
1450 (void *)msdu->data, hdr_len);
1451 skb_pull(msdu, crypto_len);
1452 }
1453 }
1454
ath10k_htt_rx_h_undecap_nwifi(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,const u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype)1455 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1456 struct sk_buff *msdu,
1457 struct ieee80211_rx_status *status,
1458 const u8 first_hdr[64],
1459 enum htt_rx_mpdu_encrypt_type enctype)
1460 {
1461 struct ieee80211_hdr *hdr;
1462 struct htt_rx_desc *rxd;
1463 size_t hdr_len;
1464 u8 da[ETH_ALEN];
1465 u8 sa[ETH_ALEN];
1466 int l3_pad_bytes;
1467 int bytes_aligned = ar->hw_params.decap_align_bytes;
1468
1469 /* Delivered decapped frame:
1470 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1471 * [rfc1042/llc]
1472 *
1473 * Note: The nwifi header doesn't have QoS Control and is
1474 * (always?) a 3addr frame.
1475 *
1476 * Note2: There's no A-MSDU subframe header. Even if it's part
1477 * of an A-MSDU.
1478 */
1479
1480 /* pull decapped header and copy SA & DA */
1481 rxd = (void *)msdu->data - sizeof(*rxd);
1482
1483 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1484 skb_put(msdu, l3_pad_bytes);
1485
1486 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1487
1488 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1489 ether_addr_copy(da, ieee80211_get_DA(hdr));
1490 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1491 skb_pull(msdu, hdr_len);
1492
1493 /* push original 802.11 header */
1494 hdr = (struct ieee80211_hdr *)first_hdr;
1495 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1496
1497 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1498 memcpy(skb_push(msdu,
1499 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1500 (void *)hdr + round_up(hdr_len, bytes_aligned),
1501 ath10k_htt_rx_crypto_param_len(ar, enctype));
1502 }
1503
1504 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1505
1506 /* original 802.11 header has a different DA and in
1507 * case of 4addr it may also have different SA
1508 */
1509 hdr = (struct ieee80211_hdr *)msdu->data;
1510 ether_addr_copy(ieee80211_get_DA(hdr), da);
1511 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1512 }
1513
ath10k_htt_rx_h_find_rfc1042(struct ath10k * ar,struct sk_buff * msdu,enum htt_rx_mpdu_encrypt_type enctype)1514 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1515 struct sk_buff *msdu,
1516 enum htt_rx_mpdu_encrypt_type enctype)
1517 {
1518 struct ieee80211_hdr *hdr;
1519 struct htt_rx_desc *rxd;
1520 size_t hdr_len, crypto_len;
1521 void *rfc1042;
1522 bool is_first, is_last, is_amsdu;
1523 int bytes_aligned = ar->hw_params.decap_align_bytes;
1524
1525 rxd = (void *)msdu->data - sizeof(*rxd);
1526 hdr = (void *)rxd->rx_hdr_status;
1527
1528 is_first = !!(rxd->msdu_end.common.info0 &
1529 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1530 is_last = !!(rxd->msdu_end.common.info0 &
1531 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1532 is_amsdu = !(is_first && is_last);
1533
1534 rfc1042 = hdr;
1535
1536 if (is_first) {
1537 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1538 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1539
1540 rfc1042 += round_up(hdr_len, bytes_aligned) +
1541 round_up(crypto_len, bytes_aligned);
1542 }
1543
1544 if (is_amsdu)
1545 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1546
1547 return rfc1042;
1548 }
1549
ath10k_htt_rx_h_undecap_eth(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,const u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype)1550 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1551 struct sk_buff *msdu,
1552 struct ieee80211_rx_status *status,
1553 const u8 first_hdr[64],
1554 enum htt_rx_mpdu_encrypt_type enctype)
1555 {
1556 struct ieee80211_hdr *hdr;
1557 struct ethhdr *eth;
1558 size_t hdr_len;
1559 void *rfc1042;
1560 u8 da[ETH_ALEN];
1561 u8 sa[ETH_ALEN];
1562 int l3_pad_bytes;
1563 struct htt_rx_desc *rxd;
1564 int bytes_aligned = ar->hw_params.decap_align_bytes;
1565
1566 /* Delivered decapped frame:
1567 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1568 * [payload]
1569 */
1570
1571 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1572 if (WARN_ON_ONCE(!rfc1042))
1573 return;
1574
1575 rxd = (void *)msdu->data - sizeof(*rxd);
1576 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1577 skb_put(msdu, l3_pad_bytes);
1578 skb_pull(msdu, l3_pad_bytes);
1579
1580 /* pull decapped header and copy SA & DA */
1581 eth = (struct ethhdr *)msdu->data;
1582 ether_addr_copy(da, eth->h_dest);
1583 ether_addr_copy(sa, eth->h_source);
1584 skb_pull(msdu, sizeof(struct ethhdr));
1585
1586 /* push rfc1042/llc/snap */
1587 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1588 sizeof(struct rfc1042_hdr));
1589
1590 /* push original 802.11 header */
1591 hdr = (struct ieee80211_hdr *)first_hdr;
1592 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1593
1594 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1595 memcpy(skb_push(msdu,
1596 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1597 (void *)hdr + round_up(hdr_len, bytes_aligned),
1598 ath10k_htt_rx_crypto_param_len(ar, enctype));
1599 }
1600
1601 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1602
1603 /* original 802.11 header has a different DA and in
1604 * case of 4addr it may also have different SA
1605 */
1606 hdr = (struct ieee80211_hdr *)msdu->data;
1607 ether_addr_copy(ieee80211_get_DA(hdr), da);
1608 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1609 }
1610
ath10k_htt_rx_h_undecap_snap(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,const u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype)1611 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1612 struct sk_buff *msdu,
1613 struct ieee80211_rx_status *status,
1614 const u8 first_hdr[64],
1615 enum htt_rx_mpdu_encrypt_type enctype)
1616 {
1617 struct ieee80211_hdr *hdr;
1618 size_t hdr_len;
1619 int l3_pad_bytes;
1620 struct htt_rx_desc *rxd;
1621 int bytes_aligned = ar->hw_params.decap_align_bytes;
1622
1623 /* Delivered decapped frame:
1624 * [amsdu header] <-- replaced with 802.11 hdr
1625 * [rfc1042/llc]
1626 * [payload]
1627 */
1628
1629 rxd = (void *)msdu->data - sizeof(*rxd);
1630 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1631
1632 skb_put(msdu, l3_pad_bytes);
1633 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1634
1635 hdr = (struct ieee80211_hdr *)first_hdr;
1636 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1637
1638 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1639 memcpy(skb_push(msdu,
1640 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1641 (void *)hdr + round_up(hdr_len, bytes_aligned),
1642 ath10k_htt_rx_crypto_param_len(ar, enctype));
1643 }
1644
1645 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1646 }
1647
ath10k_htt_rx_h_undecap(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype,bool is_decrypted)1648 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1649 struct sk_buff *msdu,
1650 struct ieee80211_rx_status *status,
1651 u8 first_hdr[64],
1652 enum htt_rx_mpdu_encrypt_type enctype,
1653 bool is_decrypted)
1654 {
1655 struct htt_rx_desc *rxd;
1656 enum rx_msdu_decap_format decap;
1657
1658 /* First msdu's decapped header:
1659 * [802.11 header] <-- padded to 4 bytes long
1660 * [crypto param] <-- padded to 4 bytes long
1661 * [amsdu header] <-- only if A-MSDU
1662 * [rfc1042/llc]
1663 *
1664 * Other (2nd, 3rd, ..) msdu's decapped header:
1665 * [amsdu header] <-- only if A-MSDU
1666 * [rfc1042/llc]
1667 */
1668
1669 rxd = (void *)msdu->data - sizeof(*rxd);
1670 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1671 RX_MSDU_START_INFO1_DECAP_FORMAT);
1672
1673 switch (decap) {
1674 case RX_MSDU_DECAP_RAW:
1675 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1676 is_decrypted, first_hdr);
1677 break;
1678 case RX_MSDU_DECAP_NATIVE_WIFI:
1679 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1680 enctype);
1681 break;
1682 case RX_MSDU_DECAP_ETHERNET2_DIX:
1683 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1684 break;
1685 case RX_MSDU_DECAP_8023_SNAP_LLC:
1686 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1687 enctype);
1688 break;
1689 }
1690 }
1691
ath10k_htt_rx_get_csum_state(struct sk_buff * skb)1692 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1693 {
1694 struct htt_rx_desc *rxd;
1695 u32 flags, info;
1696 bool is_ip4, is_ip6;
1697 bool is_tcp, is_udp;
1698 bool ip_csum_ok, tcpudp_csum_ok;
1699
1700 rxd = (void *)skb->data - sizeof(*rxd);
1701 flags = __le32_to_cpu(rxd->attention.flags);
1702 info = __le32_to_cpu(rxd->msdu_start.common.info1);
1703
1704 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1705 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1706 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1707 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1708 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1709 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1710
1711 if (!is_ip4 && !is_ip6)
1712 return CHECKSUM_NONE;
1713 if (!is_tcp && !is_udp)
1714 return CHECKSUM_NONE;
1715 if (!ip_csum_ok)
1716 return CHECKSUM_NONE;
1717 if (!tcpudp_csum_ok)
1718 return CHECKSUM_NONE;
1719
1720 return CHECKSUM_UNNECESSARY;
1721 }
1722
ath10k_htt_rx_h_csum_offload(struct sk_buff * msdu)1723 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1724 {
1725 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1726 }
1727
ath10k_htt_rx_h_mpdu(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * status,bool fill_crypt_header,u8 * rx_hdr,enum ath10k_pkt_rx_err * err)1728 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1729 struct sk_buff_head *amsdu,
1730 struct ieee80211_rx_status *status,
1731 bool fill_crypt_header,
1732 u8 *rx_hdr,
1733 enum ath10k_pkt_rx_err *err)
1734 {
1735 struct sk_buff *first;
1736 struct sk_buff *last;
1737 struct sk_buff *msdu;
1738 struct htt_rx_desc *rxd;
1739 struct ieee80211_hdr *hdr;
1740 enum htt_rx_mpdu_encrypt_type enctype;
1741 u8 first_hdr[64];
1742 u8 *qos;
1743 bool has_fcs_err;
1744 bool has_crypto_err;
1745 bool has_tkip_err;
1746 bool has_peer_idx_invalid;
1747 bool is_decrypted;
1748 bool is_mgmt;
1749 u32 attention;
1750
1751 if (skb_queue_empty(amsdu))
1752 return;
1753
1754 first = skb_peek(amsdu);
1755 rxd = (void *)first->data - sizeof(*rxd);
1756
1757 is_mgmt = !!(rxd->attention.flags &
1758 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1759
1760 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1761 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1762
1763 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1764 * decapped header. It'll be used for undecapping of each MSDU.
1765 */
1766 hdr = (void *)rxd->rx_hdr_status;
1767 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1768
1769 if (rx_hdr)
1770 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1771
1772 /* Each A-MSDU subframe will use the original header as the base and be
1773 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1774 */
1775 hdr = (void *)first_hdr;
1776
1777 if (ieee80211_is_data_qos(hdr->frame_control)) {
1778 qos = ieee80211_get_qos_ctl(hdr);
1779 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1780 }
1781
1782 /* Some attention flags are valid only in the last MSDU. */
1783 last = skb_peek_tail(amsdu);
1784 rxd = (void *)last->data - sizeof(*rxd);
1785 attention = __le32_to_cpu(rxd->attention.flags);
1786
1787 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1788 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1789 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1790 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1791
1792 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1793 * e.g. due to fcs error, missing peer or invalid key data it will
1794 * report the frame as raw.
1795 */
1796 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1797 !has_fcs_err &&
1798 !has_crypto_err &&
1799 !has_peer_idx_invalid);
1800
1801 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1802 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1803 RX_FLAG_MMIC_ERROR |
1804 RX_FLAG_DECRYPTED |
1805 RX_FLAG_IV_STRIPPED |
1806 RX_FLAG_ONLY_MONITOR |
1807 RX_FLAG_MMIC_STRIPPED);
1808
1809 if (has_fcs_err)
1810 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1811
1812 if (has_tkip_err)
1813 status->flag |= RX_FLAG_MMIC_ERROR;
1814
1815 if (err) {
1816 if (has_fcs_err)
1817 *err = ATH10K_PKT_RX_ERR_FCS;
1818 else if (has_tkip_err)
1819 *err = ATH10K_PKT_RX_ERR_TKIP;
1820 else if (has_crypto_err)
1821 *err = ATH10K_PKT_RX_ERR_CRYPT;
1822 else if (has_peer_idx_invalid)
1823 *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
1824 }
1825
1826 /* Firmware reports all necessary management frames via WMI already.
1827 * They are not reported to monitor interfaces at all so pass the ones
1828 * coming via HTT to monitor interfaces instead. This simplifies
1829 * matters a lot.
1830 */
1831 if (is_mgmt)
1832 status->flag |= RX_FLAG_ONLY_MONITOR;
1833
1834 if (is_decrypted) {
1835 status->flag |= RX_FLAG_DECRYPTED;
1836
1837 if (likely(!is_mgmt))
1838 status->flag |= RX_FLAG_MMIC_STRIPPED;
1839
1840 if (fill_crypt_header)
1841 status->flag |= RX_FLAG_MIC_STRIPPED |
1842 RX_FLAG_ICV_STRIPPED;
1843 else
1844 status->flag |= RX_FLAG_IV_STRIPPED;
1845 }
1846
1847 skb_queue_walk(amsdu, msdu) {
1848 ath10k_htt_rx_h_csum_offload(msdu);
1849 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1850 is_decrypted);
1851
1852 /* Undecapping involves copying the original 802.11 header back
1853 * to sk_buff. If frame is protected and hardware has decrypted
1854 * it then remove the protected bit.
1855 */
1856 if (!is_decrypted)
1857 continue;
1858 if (is_mgmt)
1859 continue;
1860
1861 if (fill_crypt_header)
1862 continue;
1863
1864 hdr = (void *)msdu->data;
1865 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1866 }
1867 }
1868
ath10k_htt_rx_h_enqueue(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * status)1869 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
1870 struct sk_buff_head *amsdu,
1871 struct ieee80211_rx_status *status)
1872 {
1873 struct sk_buff *msdu;
1874 struct sk_buff *first_subframe;
1875
1876 first_subframe = skb_peek(amsdu);
1877
1878 while ((msdu = __skb_dequeue(amsdu))) {
1879 /* Setup per-MSDU flags */
1880 if (skb_queue_empty(amsdu))
1881 status->flag &= ~RX_FLAG_AMSDU_MORE;
1882 else
1883 status->flag |= RX_FLAG_AMSDU_MORE;
1884
1885 if (msdu == first_subframe) {
1886 first_subframe = NULL;
1887 status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1888 } else {
1889 status->flag |= RX_FLAG_ALLOW_SAME_PN;
1890 }
1891
1892 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
1893 }
1894 }
1895
ath10k_unchain_msdu(struct sk_buff_head * amsdu,unsigned long * unchain_cnt)1896 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
1897 unsigned long *unchain_cnt)
1898 {
1899 struct sk_buff *skb, *first;
1900 int space;
1901 int total_len = 0;
1902 int amsdu_len = skb_queue_len(amsdu);
1903
1904 /* TODO: Might could optimize this by using
1905 * skb_try_coalesce or similar method to
1906 * decrease copying, or maybe get mac80211 to
1907 * provide a way to just receive a list of
1908 * skb?
1909 */
1910
1911 first = __skb_dequeue(amsdu);
1912
1913 /* Allocate total length all at once. */
1914 skb_queue_walk(amsdu, skb)
1915 total_len += skb->len;
1916
1917 space = total_len - skb_tailroom(first);
1918 if ((space > 0) &&
1919 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1920 /* TODO: bump some rx-oom error stat */
1921 /* put it back together so we can free the
1922 * whole list at once.
1923 */
1924 __skb_queue_head(amsdu, first);
1925 return -1;
1926 }
1927
1928 /* Walk list again, copying contents into
1929 * msdu_head
1930 */
1931 while ((skb = __skb_dequeue(amsdu))) {
1932 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1933 skb->len);
1934 dev_kfree_skb_any(skb);
1935 }
1936
1937 __skb_queue_head(amsdu, first);
1938
1939 *unchain_cnt += amsdu_len - 1;
1940
1941 return 0;
1942 }
1943
ath10k_htt_rx_h_unchain(struct ath10k * ar,struct sk_buff_head * amsdu,unsigned long * drop_cnt,unsigned long * unchain_cnt)1944 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1945 struct sk_buff_head *amsdu,
1946 unsigned long *drop_cnt,
1947 unsigned long *unchain_cnt)
1948 {
1949 struct sk_buff *first;
1950 struct htt_rx_desc *rxd;
1951 enum rx_msdu_decap_format decap;
1952
1953 first = skb_peek(amsdu);
1954 rxd = (void *)first->data - sizeof(*rxd);
1955 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1956 RX_MSDU_START_INFO1_DECAP_FORMAT);
1957
1958 /* FIXME: Current unchaining logic can only handle simple case of raw
1959 * msdu chaining. If decapping is other than raw the chaining may be
1960 * more complex and this isn't handled by the current code. Don't even
1961 * try re-constructing such frames - it'll be pretty much garbage.
1962 */
1963 if (decap != RX_MSDU_DECAP_RAW ||
1964 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1965 *drop_cnt += skb_queue_len(amsdu);
1966 __skb_queue_purge(amsdu);
1967 return;
1968 }
1969
1970 ath10k_unchain_msdu(amsdu, unchain_cnt);
1971 }
1972
ath10k_htt_rx_amsdu_allowed(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * rx_status)1973 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1974 struct sk_buff_head *amsdu,
1975 struct ieee80211_rx_status *rx_status)
1976 {
1977 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1978 * invalid/dangerous frames.
1979 */
1980
1981 if (!rx_status->freq) {
1982 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
1983 return false;
1984 }
1985
1986 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1987 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1988 return false;
1989 }
1990
1991 return true;
1992 }
1993
ath10k_htt_rx_h_filter(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * rx_status,unsigned long * drop_cnt)1994 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1995 struct sk_buff_head *amsdu,
1996 struct ieee80211_rx_status *rx_status,
1997 unsigned long *drop_cnt)
1998 {
1999 if (skb_queue_empty(amsdu))
2000 return;
2001
2002 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
2003 return;
2004
2005 if (drop_cnt)
2006 *drop_cnt += skb_queue_len(amsdu);
2007
2008 __skb_queue_purge(amsdu);
2009 }
2010
ath10k_htt_rx_handle_amsdu(struct ath10k_htt * htt)2011 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
2012 {
2013 struct ath10k *ar = htt->ar;
2014 struct ieee80211_rx_status *rx_status = &htt->rx_status;
2015 struct sk_buff_head amsdu;
2016 int ret;
2017 unsigned long drop_cnt = 0;
2018 unsigned long unchain_cnt = 0;
2019 unsigned long drop_cnt_filter = 0;
2020 unsigned long msdus_to_queue, num_msdus;
2021 enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
2022 u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
2023
2024 __skb_queue_head_init(&amsdu);
2025
2026 spin_lock_bh(&htt->rx_ring.lock);
2027 if (htt->rx_confused) {
2028 spin_unlock_bh(&htt->rx_ring.lock);
2029 return -EIO;
2030 }
2031 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
2032 spin_unlock_bh(&htt->rx_ring.lock);
2033
2034 if (ret < 0) {
2035 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
2036 __skb_queue_purge(&amsdu);
2037 /* FIXME: It's probably a good idea to reboot the
2038 * device instead of leaving it inoperable.
2039 */
2040 htt->rx_confused = true;
2041 return ret;
2042 }
2043
2044 num_msdus = skb_queue_len(&amsdu);
2045
2046 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
2047
2048 /* only for ret = 1 indicates chained msdus */
2049 if (ret > 0)
2050 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
2051
2052 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
2053 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
2054 msdus_to_queue = skb_queue_len(&amsdu);
2055 ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
2056
2057 ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
2058 unchain_cnt, drop_cnt, drop_cnt_filter,
2059 msdus_to_queue);
2060
2061 return 0;
2062 }
2063
ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc * rx_desc,union htt_rx_pn_t * pn,int pn_len_bits)2064 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
2065 union htt_rx_pn_t *pn,
2066 int pn_len_bits)
2067 {
2068 switch (pn_len_bits) {
2069 case 48:
2070 pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
2071 ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
2072 break;
2073 case 24:
2074 pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
2075 break;
2076 };
2077 }
2078
ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t * new_pn,union htt_rx_pn_t * old_pn)2079 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
2080 union htt_rx_pn_t *old_pn)
2081 {
2082 return ((new_pn->pn48 & 0xffffffffffffULL) <=
2083 (old_pn->pn48 & 0xffffffffffffULL));
2084 }
2085
ath10k_htt_rx_pn_check_replay_hl(struct ath10k * ar,struct ath10k_peer * peer,struct htt_rx_indication_hl * rx)2086 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
2087 struct ath10k_peer *peer,
2088 struct htt_rx_indication_hl *rx)
2089 {
2090 bool last_pn_valid, pn_invalid = false;
2091 enum htt_txrx_sec_cast_type sec_index;
2092 enum htt_security_types sec_type;
2093 union htt_rx_pn_t new_pn = {0};
2094 struct htt_hl_rx_desc *rx_desc;
2095 union htt_rx_pn_t *last_pn;
2096 u32 rx_desc_info, tid;
2097 int num_mpdu_ranges;
2098
2099 lockdep_assert_held(&ar->data_lock);
2100
2101 if (!peer)
2102 return false;
2103
2104 if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
2105 return false;
2106
2107 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2108 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2109
2110 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2111 rx_desc_info = __le32_to_cpu(rx_desc->info);
2112
2113 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
2114 return false;
2115
2116 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2117 last_pn_valid = peer->tids_last_pn_valid[tid];
2118 last_pn = &peer->tids_last_pn[tid];
2119
2120 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2121 sec_index = HTT_TXRX_SEC_MCAST;
2122 else
2123 sec_index = HTT_TXRX_SEC_UCAST;
2124
2125 sec_type = peer->rx_pn[sec_index].sec_type;
2126 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2127
2128 if (sec_type != HTT_SECURITY_AES_CCMP &&
2129 sec_type != HTT_SECURITY_TKIP &&
2130 sec_type != HTT_SECURITY_TKIP_NOMIC)
2131 return false;
2132
2133 if (last_pn_valid)
2134 pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
2135 else
2136 peer->tids_last_pn_valid[tid] = 1;
2137
2138 if (!pn_invalid)
2139 last_pn->pn48 = new_pn.pn48;
2140
2141 return pn_invalid;
2142 }
2143
ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt * htt,struct htt_rx_indication_hl * rx,struct sk_buff * skb,enum htt_rx_pn_check_type check_pn_type,enum htt_rx_tkip_demic_type tkip_mic_type)2144 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
2145 struct htt_rx_indication_hl *rx,
2146 struct sk_buff *skb,
2147 enum htt_rx_pn_check_type check_pn_type,
2148 enum htt_rx_tkip_demic_type tkip_mic_type)
2149 {
2150 struct ath10k *ar = htt->ar;
2151 struct ath10k_peer *peer;
2152 struct htt_rx_indication_mpdu_range *mpdu_ranges;
2153 struct fw_rx_desc_hl *fw_desc;
2154 enum htt_txrx_sec_cast_type sec_index;
2155 enum htt_security_types sec_type;
2156 union htt_rx_pn_t new_pn = {0};
2157 struct htt_hl_rx_desc *rx_desc;
2158 struct ieee80211_hdr *hdr;
2159 struct ieee80211_rx_status *rx_status;
2160 u16 peer_id;
2161 u8 rx_desc_len;
2162 int num_mpdu_ranges;
2163 size_t tot_hdr_len;
2164 struct ieee80211_channel *ch;
2165 bool pn_invalid, qos, first_msdu;
2166 u32 tid, rx_desc_info;
2167
2168 peer_id = __le16_to_cpu(rx->hdr.peer_id);
2169 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2170
2171 spin_lock_bh(&ar->data_lock);
2172 peer = ath10k_peer_find_by_id(ar, peer_id);
2173 spin_unlock_bh(&ar->data_lock);
2174 if (!peer && peer_id != HTT_INVALID_PEERID)
2175 ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
2176
2177 if (!peer)
2178 return true;
2179
2180 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2181 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2182 mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
2183 fw_desc = &rx->fw_desc;
2184 rx_desc_len = fw_desc->len;
2185
2186 /* I have not yet seen any case where num_mpdu_ranges > 1.
2187 * qcacld does not seem handle that case either, so we introduce the
2188 * same limitiation here as well.
2189 */
2190 if (num_mpdu_ranges > 1)
2191 ath10k_warn(ar,
2192 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2193 num_mpdu_ranges);
2194
2195 if (mpdu_ranges->mpdu_range_status !=
2196 HTT_RX_IND_MPDU_STATUS_OK &&
2197 mpdu_ranges->mpdu_range_status !=
2198 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
2199 ath10k_warn(ar, "MPDU range status: %d\n",
2200 mpdu_ranges->mpdu_range_status);
2201 goto err;
2202 }
2203
2204 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2205 rx_desc_info = __le32_to_cpu(rx_desc->info);
2206
2207 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2208 sec_index = HTT_TXRX_SEC_MCAST;
2209 else
2210 sec_index = HTT_TXRX_SEC_UCAST;
2211
2212 sec_type = peer->rx_pn[sec_index].sec_type;
2213 first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;
2214
2215 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2216
2217 if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {
2218 spin_lock_bh(&ar->data_lock);
2219 pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
2220 spin_unlock_bh(&ar->data_lock);
2221
2222 if (pn_invalid)
2223 goto err;
2224 }
2225
2226 /* Strip off all headers before the MAC header before delivery to
2227 * mac80211
2228 */
2229 tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
2230 sizeof(rx->ppdu) + sizeof(rx->prefix) +
2231 sizeof(rx->fw_desc) +
2232 sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
2233
2234 skb_pull(skb, tot_hdr_len);
2235
2236 hdr = (struct ieee80211_hdr *)skb->data;
2237 qos = ieee80211_is_data_qos(hdr->frame_control);
2238 rx_status = IEEE80211_SKB_RXCB(skb);
2239 rx_status->chains |= BIT(0);
2240 if (rx->ppdu.combined_rssi == 0) {
2241 /* SDIO firmware does not provide signal */
2242 rx_status->signal = 0;
2243 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2244 } else {
2245 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
2246 rx->ppdu.combined_rssi;
2247 rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
2248 }
2249
2250 spin_lock_bh(&ar->data_lock);
2251 ch = ar->scan_channel;
2252 if (!ch)
2253 ch = ar->rx_channel;
2254 if (!ch)
2255 ch = ath10k_htt_rx_h_any_channel(ar);
2256 if (!ch)
2257 ch = ar->tgt_oper_chan;
2258 spin_unlock_bh(&ar->data_lock);
2259
2260 if (ch) {
2261 rx_status->band = ch->band;
2262 rx_status->freq = ch->center_freq;
2263 }
2264 if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
2265 rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
2266 else
2267 rx_status->flag |= RX_FLAG_AMSDU_MORE;
2268
2269 /* Not entirely sure about this, but all frames from the chipset has
2270 * the protected flag set even though they have already been decrypted.
2271 * Unmasking this flag is necessary in order for mac80211 not to drop
2272 * the frame.
2273 * TODO: Verify this is always the case or find out a way to check
2274 * if there has been hw decryption.
2275 */
2276 if (ieee80211_has_protected(hdr->frame_control)) {
2277 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2278 rx_status->flag |= RX_FLAG_DECRYPTED |
2279 RX_FLAG_IV_STRIPPED |
2280 RX_FLAG_MMIC_STRIPPED;
2281
2282 if (tid < IEEE80211_NUM_TIDS &&
2283 first_msdu &&
2284 check_pn_type == HTT_RX_PN_CHECK &&
2285 (sec_type == HTT_SECURITY_AES_CCMP ||
2286 sec_type == HTT_SECURITY_TKIP ||
2287 sec_type == HTT_SECURITY_TKIP_NOMIC)) {
2288 u8 offset, *ivp, i;
2289 s8 keyidx = 0;
2290 __le64 pn48 = cpu_to_le64(new_pn.pn48);
2291
2292 hdr = (struct ieee80211_hdr *)skb->data;
2293 offset = ieee80211_hdrlen(hdr->frame_control);
2294 hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2295 rx_status->flag &= ~RX_FLAG_IV_STRIPPED;
2296
2297 memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
2298 skb->data, offset);
2299 skb_push(skb, IEEE80211_CCMP_HDR_LEN);
2300 ivp = skb->data + offset;
2301 memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
2302 /* Ext IV */
2303 ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;
2304
2305 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
2306 if (peer->keys[i] &&
2307 peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
2308 keyidx = peer->keys[i]->keyidx;
2309 }
2310
2311 /* Key ID */
2312 ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;
2313
2314 if (sec_type == HTT_SECURITY_AES_CCMP) {
2315 rx_status->flag |= RX_FLAG_MIC_STRIPPED;
2316 /* pn 0, pn 1 */
2317 memcpy(skb->data + offset, &pn48, 2);
2318 /* pn 1, pn 3 , pn 34 , pn 5 */
2319 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2320 } else {
2321 rx_status->flag |= RX_FLAG_ICV_STRIPPED;
2322 /* TSC 0 */
2323 memcpy(skb->data + offset + 2, &pn48, 1);
2324 /* TSC 1 */
2325 memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
2326 /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2327 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2328 }
2329 }
2330 }
2331
2332 if (tkip_mic_type == HTT_RX_TKIP_MIC)
2333 rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
2334 ~RX_FLAG_MMIC_STRIPPED;
2335
2336 if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
2337 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2338
2339 if (!qos && tid < IEEE80211_NUM_TIDS) {
2340 u8 offset;
2341 __le16 qos_ctrl = 0;
2342
2343 hdr = (struct ieee80211_hdr *)skb->data;
2344 offset = ieee80211_hdrlen(hdr->frame_control);
2345
2346 hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2347 memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
2348 skb_push(skb, IEEE80211_QOS_CTL_LEN);
2349 qos_ctrl = cpu_to_le16(tid);
2350 memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
2351 }
2352
2353 ieee80211_rx_ni(ar->hw, skb);
2354
2355 /* We have delivered the skb to the upper layers (mac80211) so we
2356 * must not free it.
2357 */
2358 return false;
2359 err:
2360 /* Tell the caller that it must free the skb since we have not
2361 * consumed it
2362 */
2363 return true;
2364 }
2365
ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff * skb,u16 head_len,u16 hdr_len)2366 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
2367 u16 head_len,
2368 u16 hdr_len)
2369 {
2370 u8 *ivp, *orig_hdr;
2371
2372 orig_hdr = skb->data;
2373 ivp = orig_hdr + hdr_len + head_len;
2374
2375 /* the ExtIV bit is always set to 1 for TKIP */
2376 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2377 return -EINVAL;
2378
2379 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2380 skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2381 skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
2382 return 0;
2383 }
2384
ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff * skb,u16 head_len,u16 hdr_len)2385 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
2386 u16 head_len,
2387 u16 hdr_len)
2388 {
2389 u8 *ivp, *orig_hdr;
2390
2391 orig_hdr = skb->data;
2392 ivp = orig_hdr + hdr_len + head_len;
2393
2394 /* the ExtIV bit is always set to 1 for TKIP */
2395 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2396 return -EINVAL;
2397
2398 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2399 skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2400 skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
2401 return 0;
2402 }
2403
ath10k_htt_rx_frag_ccmp_decap(struct sk_buff * skb,u16 head_len,u16 hdr_len)2404 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
2405 u16 head_len,
2406 u16 hdr_len)
2407 {
2408 u8 *ivp, *orig_hdr;
2409
2410 orig_hdr = skb->data;
2411 ivp = orig_hdr + hdr_len + head_len;
2412
2413 /* the ExtIV bit is always set to 1 for CCMP */
2414 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2415 return -EINVAL;
2416
2417 skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
2418 memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
2419 skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
2420 return 0;
2421 }
2422
ath10k_htt_rx_frag_wep_decap(struct sk_buff * skb,u16 head_len,u16 hdr_len)2423 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
2424 u16 head_len,
2425 u16 hdr_len)
2426 {
2427 u8 *orig_hdr;
2428
2429 orig_hdr = skb->data;
2430
2431 memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
2432 orig_hdr, head_len + hdr_len);
2433 skb_pull(skb, IEEE80211_WEP_IV_LEN);
2434 skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
2435 return 0;
2436 }
2437
ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt * htt,struct htt_rx_fragment_indication * rx,struct sk_buff * skb)2438 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
2439 struct htt_rx_fragment_indication *rx,
2440 struct sk_buff *skb)
2441 {
2442 struct ath10k *ar = htt->ar;
2443 enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
2444 enum htt_txrx_sec_cast_type sec_index;
2445 struct htt_rx_indication_hl *rx_hl;
2446 enum htt_security_types sec_type;
2447 u32 tid, frag, seq, rx_desc_info;
2448 union htt_rx_pn_t new_pn = {0};
2449 struct htt_hl_rx_desc *rx_desc;
2450 u16 peer_id, sc, hdr_space;
2451 union htt_rx_pn_t *last_pn;
2452 struct ieee80211_hdr *hdr;
2453 int ret, num_mpdu_ranges;
2454 struct ath10k_peer *peer;
2455 struct htt_resp *resp;
2456 size_t tot_hdr_len;
2457
2458 resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2459 skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2460 skb_trim(skb, skb->len - FCS_LEN);
2461
2462 peer_id = __le16_to_cpu(rx->peer_id);
2463 rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
2464
2465 spin_lock_bh(&ar->data_lock);
2466 peer = ath10k_peer_find_by_id(ar, peer_id);
2467 if (!peer) {
2468 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
2469 goto err;
2470 }
2471
2472 num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
2473 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2474
2475 tot_hdr_len = sizeof(struct htt_resp_hdr) +
2476 sizeof(rx_hl->hdr) +
2477 sizeof(rx_hl->ppdu) +
2478 sizeof(rx_hl->prefix) +
2479 sizeof(rx_hl->fw_desc) +
2480 sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
2481
2482 tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2483 rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
2484 rx_desc_info = __le32_to_cpu(rx_desc->info);
2485
2486 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
2487 spin_unlock_bh(&ar->data_lock);
2488 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2489 HTT_RX_NON_PN_CHECK,
2490 HTT_RX_NON_TKIP_MIC);
2491 }
2492
2493 hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
2494
2495 if (ieee80211_has_retry(hdr->frame_control))
2496 goto err;
2497
2498 hdr_space = ieee80211_hdrlen(hdr->frame_control);
2499 sc = __le16_to_cpu(hdr->seq_ctrl);
2500 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2501 frag = sc & IEEE80211_SCTL_FRAG;
2502
2503 sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
2504 HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
2505 sec_type = peer->rx_pn[sec_index].sec_type;
2506 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2507
2508 switch (sec_type) {
2509 case HTT_SECURITY_TKIP:
2510 tkip_mic = HTT_RX_TKIP_MIC;
2511 ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
2512 tot_hdr_len +
2513 rx_hl->fw_desc.len,
2514 hdr_space);
2515 if (ret)
2516 goto err;
2517 break;
2518 case HTT_SECURITY_TKIP_NOMIC:
2519 ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
2520 tot_hdr_len +
2521 rx_hl->fw_desc.len,
2522 hdr_space);
2523 if (ret)
2524 goto err;
2525 break;
2526 case HTT_SECURITY_AES_CCMP:
2527 ret = ath10k_htt_rx_frag_ccmp_decap(skb,
2528 tot_hdr_len + rx_hl->fw_desc.len,
2529 hdr_space);
2530 if (ret)
2531 goto err;
2532 break;
2533 case HTT_SECURITY_WEP128:
2534 case HTT_SECURITY_WEP104:
2535 case HTT_SECURITY_WEP40:
2536 ret = ath10k_htt_rx_frag_wep_decap(skb,
2537 tot_hdr_len + rx_hl->fw_desc.len,
2538 hdr_space);
2539 if (ret)
2540 goto err;
2541 break;
2542 default:
2543 break;
2544 }
2545
2546 resp = (struct htt_resp *)(skb->data);
2547
2548 if (sec_type != HTT_SECURITY_AES_CCMP &&
2549 sec_type != HTT_SECURITY_TKIP &&
2550 sec_type != HTT_SECURITY_TKIP_NOMIC) {
2551 spin_unlock_bh(&ar->data_lock);
2552 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2553 HTT_RX_NON_PN_CHECK,
2554 HTT_RX_NON_TKIP_MIC);
2555 }
2556
2557 last_pn = &peer->frag_tids_last_pn[tid];
2558
2559 if (frag == 0) {
2560 if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
2561 goto err;
2562
2563 last_pn->pn48 = new_pn.pn48;
2564 peer->frag_tids_seq[tid] = seq;
2565 } else if (sec_type == HTT_SECURITY_AES_CCMP) {
2566 if (seq != peer->frag_tids_seq[tid])
2567 goto err;
2568
2569 if (new_pn.pn48 != last_pn->pn48 + 1)
2570 goto err;
2571
2572 last_pn->pn48 = new_pn.pn48;
2573 last_pn = &peer->tids_last_pn[tid];
2574 last_pn->pn48 = new_pn.pn48;
2575 }
2576
2577 spin_unlock_bh(&ar->data_lock);
2578
2579 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2580 HTT_RX_NON_PN_CHECK, tkip_mic);
2581
2582 err:
2583 spin_unlock_bh(&ar->data_lock);
2584
2585 /* Tell the caller that it must free the skb since we have not
2586 * consumed it
2587 */
2588 return true;
2589 }
2590
ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt * htt,struct htt_rx_indication * rx)2591 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
2592 struct htt_rx_indication *rx)
2593 {
2594 struct ath10k *ar = htt->ar;
2595 struct htt_rx_indication_mpdu_range *mpdu_ranges;
2596 int num_mpdu_ranges;
2597 int i, mpdu_count = 0;
2598 u16 peer_id;
2599 u8 tid;
2600
2601 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2602 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2603 peer_id = __le16_to_cpu(rx->hdr.peer_id);
2604 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2605
2606 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
2607
2608 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
2609 rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
2610
2611 for (i = 0; i < num_mpdu_ranges; i++)
2612 mpdu_count += mpdu_ranges[i].mpdu_count;
2613
2614 atomic_add(mpdu_count, &htt->num_mpdus_ready);
2615
2616 ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
2617 num_mpdu_ranges);
2618 }
2619
ath10k_htt_rx_tx_compl_ind(struct ath10k * ar,struct sk_buff * skb)2620 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
2621 struct sk_buff *skb)
2622 {
2623 struct ath10k_htt *htt = &ar->htt;
2624 struct htt_resp *resp = (struct htt_resp *)skb->data;
2625 struct htt_tx_done tx_done = {};
2626 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
2627 __le16 msdu_id, *msdus;
2628 bool rssi_enabled = false;
2629 u8 msdu_count = 0, num_airtime_records, tid;
2630 int i, htt_pad = 0;
2631 struct htt_data_tx_compl_ppdu_dur *ppdu_info;
2632 struct ath10k_peer *peer;
2633 u16 ppdu_info_offset = 0, peer_id;
2634 u32 tx_duration;
2635
2636 switch (status) {
2637 case HTT_DATA_TX_STATUS_NO_ACK:
2638 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2639 break;
2640 case HTT_DATA_TX_STATUS_OK:
2641 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2642 break;
2643 case HTT_DATA_TX_STATUS_DISCARD:
2644 case HTT_DATA_TX_STATUS_POSTPONE:
2645 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
2646 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2647 break;
2648 default:
2649 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
2650 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2651 break;
2652 }
2653
2654 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
2655 resp->data_tx_completion.num_msdus);
2656
2657 msdu_count = resp->data_tx_completion.num_msdus;
2658 msdus = resp->data_tx_completion.msdus;
2659 rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
2660
2661 if (rssi_enabled)
2662 htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
2663 resp);
2664
2665 for (i = 0; i < msdu_count; i++) {
2666 msdu_id = msdus[i];
2667 tx_done.msdu_id = __le16_to_cpu(msdu_id);
2668
2669 if (rssi_enabled) {
2670 /* Total no of MSDUs should be even,
2671 * if odd MSDUs are sent firmware fills
2672 * last msdu id with 0xffff
2673 */
2674 if (msdu_count & 0x01) {
2675 msdu_id = msdus[msdu_count + i + 1 + htt_pad];
2676 tx_done.ack_rssi = __le16_to_cpu(msdu_id);
2677 } else {
2678 msdu_id = msdus[msdu_count + i + htt_pad];
2679 tx_done.ack_rssi = __le16_to_cpu(msdu_id);
2680 }
2681 }
2682
2683 /* kfifo_put: In practice firmware shouldn't fire off per-CE
2684 * interrupt and main interrupt (MSI/-X range case) for the same
2685 * HTC service so it should be safe to use kfifo_put w/o lock.
2686 *
2687 * From kfifo_put() documentation:
2688 * Note that with only one concurrent reader and one concurrent
2689 * writer, you don't need extra locking to use these macro.
2690 */
2691 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
2692 ath10k_txrx_tx_unref(htt, &tx_done);
2693 } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
2694 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
2695 tx_done.msdu_id, tx_done.status);
2696 ath10k_txrx_tx_unref(htt, &tx_done);
2697 }
2698 }
2699
2700 if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
2701 return;
2702
2703 ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
2704
2705 if (rssi_enabled)
2706 ppdu_info_offset += ppdu_info_offset;
2707
2708 if (resp->data_tx_completion.flags2 &
2709 (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
2710 ppdu_info_offset += 2;
2711
2712 ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
2713 num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
2714 __le32_to_cpu(ppdu_info->info0));
2715
2716 for (i = 0; i < num_airtime_records; i++) {
2717 struct htt_data_tx_ppdu_dur *ppdu_dur;
2718 u32 info0;
2719
2720 ppdu_dur = &ppdu_info->ppdu_dur[i];
2721 info0 = __le32_to_cpu(ppdu_dur->info0);
2722
2723 peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
2724 info0);
2725 rcu_read_lock();
2726 spin_lock_bh(&ar->data_lock);
2727
2728 peer = ath10k_peer_find_by_id(ar, peer_id);
2729 if (!peer) {
2730 spin_unlock_bh(&ar->data_lock);
2731 rcu_read_unlock();
2732 continue;
2733 }
2734
2735 tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0);
2736 tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
2737
2738 ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
2739
2740 spin_unlock_bh(&ar->data_lock);
2741 rcu_read_unlock();
2742 }
2743 }
2744
ath10k_htt_rx_addba(struct ath10k * ar,struct htt_resp * resp)2745 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
2746 {
2747 struct htt_rx_addba *ev = &resp->rx_addba;
2748 struct ath10k_peer *peer;
2749 struct ath10k_vif *arvif;
2750 u16 info0, tid, peer_id;
2751
2752 info0 = __le16_to_cpu(ev->info0);
2753 tid = MS(info0, HTT_RX_BA_INFO0_TID);
2754 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
2755
2756 ath10k_dbg(ar, ATH10K_DBG_HTT,
2757 "htt rx addba tid %hu peer_id %hu size %hhu\n",
2758 tid, peer_id, ev->window_size);
2759
2760 spin_lock_bh(&ar->data_lock);
2761 peer = ath10k_peer_find_by_id(ar, peer_id);
2762 if (!peer) {
2763 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
2764 peer_id);
2765 spin_unlock_bh(&ar->data_lock);
2766 return;
2767 }
2768
2769 arvif = ath10k_get_arvif(ar, peer->vdev_id);
2770 if (!arvif) {
2771 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
2772 peer->vdev_id);
2773 spin_unlock_bh(&ar->data_lock);
2774 return;
2775 }
2776
2777 ath10k_dbg(ar, ATH10K_DBG_HTT,
2778 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
2779 peer->addr, tid, ev->window_size);
2780
2781 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2782 spin_unlock_bh(&ar->data_lock);
2783 }
2784
ath10k_htt_rx_delba(struct ath10k * ar,struct htt_resp * resp)2785 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
2786 {
2787 struct htt_rx_delba *ev = &resp->rx_delba;
2788 struct ath10k_peer *peer;
2789 struct ath10k_vif *arvif;
2790 u16 info0, tid, peer_id;
2791
2792 info0 = __le16_to_cpu(ev->info0);
2793 tid = MS(info0, HTT_RX_BA_INFO0_TID);
2794 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
2795
2796 ath10k_dbg(ar, ATH10K_DBG_HTT,
2797 "htt rx delba tid %hu peer_id %hu\n",
2798 tid, peer_id);
2799
2800 spin_lock_bh(&ar->data_lock);
2801 peer = ath10k_peer_find_by_id(ar, peer_id);
2802 if (!peer) {
2803 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
2804 peer_id);
2805 spin_unlock_bh(&ar->data_lock);
2806 return;
2807 }
2808
2809 arvif = ath10k_get_arvif(ar, peer->vdev_id);
2810 if (!arvif) {
2811 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
2812 peer->vdev_id);
2813 spin_unlock_bh(&ar->data_lock);
2814 return;
2815 }
2816
2817 ath10k_dbg(ar, ATH10K_DBG_HTT,
2818 "htt rx stop rx ba session sta %pM tid %hu\n",
2819 peer->addr, tid);
2820
2821 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2822 spin_unlock_bh(&ar->data_lock);
2823 }
2824
ath10k_htt_rx_extract_amsdu(struct sk_buff_head * list,struct sk_buff_head * amsdu)2825 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
2826 struct sk_buff_head *amsdu)
2827 {
2828 struct sk_buff *msdu;
2829 struct htt_rx_desc *rxd;
2830
2831 if (skb_queue_empty(list))
2832 return -ENOBUFS;
2833
2834 if (WARN_ON(!skb_queue_empty(amsdu)))
2835 return -EINVAL;
2836
2837 while ((msdu = __skb_dequeue(list))) {
2838 __skb_queue_tail(amsdu, msdu);
2839
2840 rxd = (void *)msdu->data - sizeof(*rxd);
2841 if (rxd->msdu_end.common.info0 &
2842 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
2843 break;
2844 }
2845
2846 msdu = skb_peek_tail(amsdu);
2847 rxd = (void *)msdu->data - sizeof(*rxd);
2848 if (!(rxd->msdu_end.common.info0 &
2849 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
2850 skb_queue_splice_init(amsdu, list);
2851 return -EAGAIN;
2852 }
2853
2854 return 0;
2855 }
2856
ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status * status,struct sk_buff * skb)2857 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
2858 struct sk_buff *skb)
2859 {
2860 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2861
2862 if (!ieee80211_has_protected(hdr->frame_control))
2863 return;
2864
2865 /* Offloaded frames are already decrypted but firmware insists they are
2866 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
2867 * will drop the frame.
2868 */
2869
2870 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2871 status->flag |= RX_FLAG_DECRYPTED |
2872 RX_FLAG_IV_STRIPPED |
2873 RX_FLAG_MMIC_STRIPPED;
2874 }
2875
ath10k_htt_rx_h_rx_offload(struct ath10k * ar,struct sk_buff_head * list)2876 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
2877 struct sk_buff_head *list)
2878 {
2879 struct ath10k_htt *htt = &ar->htt;
2880 struct ieee80211_rx_status *status = &htt->rx_status;
2881 struct htt_rx_offload_msdu *rx;
2882 struct sk_buff *msdu;
2883 size_t offset;
2884
2885 while ((msdu = __skb_dequeue(list))) {
2886 /* Offloaded frames don't have Rx descriptor. Instead they have
2887 * a short meta information header.
2888 */
2889
2890 rx = (void *)msdu->data;
2891
2892 skb_put(msdu, sizeof(*rx));
2893 skb_pull(msdu, sizeof(*rx));
2894
2895 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
2896 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
2897 dev_kfree_skb_any(msdu);
2898 continue;
2899 }
2900
2901 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
2902
2903 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
2904 * actual payload is unaligned. Align the frame. Otherwise
2905 * mac80211 complains. This shouldn't reduce performance much
2906 * because these offloaded frames are rare.
2907 */
2908 offset = 4 - ((unsigned long)msdu->data & 3);
2909 skb_put(msdu, offset);
2910 memmove(msdu->data + offset, msdu->data, msdu->len);
2911 skb_pull(msdu, offset);
2912
2913 /* FIXME: The frame is NWifi. Re-construct QoS Control
2914 * if possible later.
2915 */
2916
2917 memset(status, 0, sizeof(*status));
2918 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2919
2920 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
2921 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
2922 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2923 }
2924 }
2925
ath10k_htt_rx_in_ord_ind(struct ath10k * ar,struct sk_buff * skb)2926 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
2927 {
2928 struct ath10k_htt *htt = &ar->htt;
2929 struct htt_resp *resp = (void *)skb->data;
2930 struct ieee80211_rx_status *status = &htt->rx_status;
2931 struct sk_buff_head list;
2932 struct sk_buff_head amsdu;
2933 u16 peer_id;
2934 u16 msdu_count;
2935 u8 vdev_id;
2936 u8 tid;
2937 bool offload;
2938 bool frag;
2939 int ret;
2940
2941 lockdep_assert_held(&htt->rx_ring.lock);
2942
2943 if (htt->rx_confused)
2944 return -EIO;
2945
2946 skb_pull(skb, sizeof(resp->hdr));
2947 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
2948
2949 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
2950 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
2951 vdev_id = resp->rx_in_ord_ind.vdev_id;
2952 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
2953 offload = !!(resp->rx_in_ord_ind.info &
2954 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
2955 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
2956
2957 ath10k_dbg(ar, ATH10K_DBG_HTT,
2958 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2959 vdev_id, peer_id, tid, offload, frag, msdu_count);
2960
2961 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
2962 ath10k_warn(ar, "dropping invalid in order rx indication\n");
2963 return -EINVAL;
2964 }
2965
2966 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2967 * extracted and processed.
2968 */
2969 __skb_queue_head_init(&list);
2970 if (ar->hw_params.target_64bit)
2971 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
2972 &list);
2973 else
2974 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
2975 &list);
2976
2977 if (ret < 0) {
2978 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
2979 htt->rx_confused = true;
2980 return -EIO;
2981 }
2982
2983 /* Offloaded frames are very different and need to be handled
2984 * separately.
2985 */
2986 if (offload)
2987 ath10k_htt_rx_h_rx_offload(ar, &list);
2988
2989 while (!skb_queue_empty(&list)) {
2990 __skb_queue_head_init(&amsdu);
2991 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
2992 switch (ret) {
2993 case 0:
2994 /* Note: The in-order indication may report interleaved
2995 * frames from different PPDUs meaning reported rx rate
2996 * to mac80211 isn't accurate/reliable. It's still
2997 * better to report something than nothing though. This
2998 * should still give an idea about rx rate to the user.
2999 */
3000 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
3001 ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
3002 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
3003 NULL);
3004 ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
3005 break;
3006 case -EAGAIN:
3007 /* fall through */
3008 default:
3009 /* Should not happen. */
3010 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
3011 htt->rx_confused = true;
3012 __skb_queue_purge(&list);
3013 return -EIO;
3014 }
3015 }
3016 return ret;
3017 }
3018
ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k * ar,const __le32 * resp_ids,int num_resp_ids)3019 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
3020 const __le32 *resp_ids,
3021 int num_resp_ids)
3022 {
3023 int i;
3024 u32 resp_id;
3025
3026 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
3027 num_resp_ids);
3028
3029 for (i = 0; i < num_resp_ids; i++) {
3030 resp_id = le32_to_cpu(resp_ids[i]);
3031
3032 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
3033 resp_id);
3034
3035 /* TODO: free resp_id */
3036 }
3037 }
3038
ath10k_htt_rx_tx_fetch_ind(struct ath10k * ar,struct sk_buff * skb)3039 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
3040 {
3041 struct ieee80211_hw *hw = ar->hw;
3042 struct ieee80211_txq *txq;
3043 struct htt_resp *resp = (struct htt_resp *)skb->data;
3044 struct htt_tx_fetch_record *record;
3045 size_t len;
3046 size_t max_num_bytes;
3047 size_t max_num_msdus;
3048 size_t num_bytes;
3049 size_t num_msdus;
3050 const __le32 *resp_ids;
3051 u16 num_records;
3052 u16 num_resp_ids;
3053 u16 peer_id;
3054 u8 tid;
3055 int ret;
3056 int i;
3057 bool may_tx;
3058
3059 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
3060
3061 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
3062 if (unlikely(skb->len < len)) {
3063 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
3064 return;
3065 }
3066
3067 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
3068 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
3069
3070 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
3071 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
3072
3073 if (unlikely(skb->len < len)) {
3074 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
3075 return;
3076 }
3077
3078 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
3079 num_records, num_resp_ids,
3080 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
3081
3082 if (!ar->htt.tx_q_state.enabled) {
3083 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
3084 return;
3085 }
3086
3087 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
3088 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
3089 return;
3090 }
3091
3092 rcu_read_lock();
3093
3094 for (i = 0; i < num_records; i++) {
3095 record = &resp->tx_fetch_ind.records[i];
3096 peer_id = MS(le16_to_cpu(record->info),
3097 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
3098 tid = MS(le16_to_cpu(record->info),
3099 HTT_TX_FETCH_RECORD_INFO_TID);
3100 max_num_msdus = le16_to_cpu(record->num_msdus);
3101 max_num_bytes = le32_to_cpu(record->num_bytes);
3102
3103 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
3104 i, peer_id, tid, max_num_msdus, max_num_bytes);
3105
3106 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3107 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3108 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
3109 peer_id, tid);
3110 continue;
3111 }
3112
3113 spin_lock_bh(&ar->data_lock);
3114 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3115 spin_unlock_bh(&ar->data_lock);
3116
3117 /* It is okay to release the lock and use txq because RCU read
3118 * lock is held.
3119 */
3120
3121 if (unlikely(!txq)) {
3122 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
3123 peer_id, tid);
3124 continue;
3125 }
3126
3127 num_msdus = 0;
3128 num_bytes = 0;
3129
3130 ieee80211_txq_schedule_start(hw, txq->ac);
3131 may_tx = ieee80211_txq_may_transmit(hw, txq);
3132 while (num_msdus < max_num_msdus &&
3133 num_bytes < max_num_bytes) {
3134 if (!may_tx)
3135 break;
3136
3137 ret = ath10k_mac_tx_push_txq(hw, txq);
3138 if (ret < 0)
3139 break;
3140
3141 num_msdus++;
3142 num_bytes += ret;
3143 }
3144 ieee80211_return_txq(hw, txq, false);
3145 ieee80211_txq_schedule_end(hw, txq->ac);
3146
3147 record->num_msdus = cpu_to_le16(num_msdus);
3148 record->num_bytes = cpu_to_le32(num_bytes);
3149
3150 ath10k_htt_tx_txq_recalc(hw, txq);
3151 }
3152
3153 rcu_read_unlock();
3154
3155 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
3156 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
3157
3158 ret = ath10k_htt_tx_fetch_resp(ar,
3159 resp->tx_fetch_ind.token,
3160 resp->tx_fetch_ind.fetch_seq_num,
3161 resp->tx_fetch_ind.records,
3162 num_records);
3163 if (unlikely(ret)) {
3164 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
3165 le32_to_cpu(resp->tx_fetch_ind.token), ret);
3166 /* FIXME: request fw restart */
3167 }
3168
3169 ath10k_htt_tx_txq_sync(ar);
3170 }
3171
ath10k_htt_rx_tx_fetch_confirm(struct ath10k * ar,struct sk_buff * skb)3172 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
3173 struct sk_buff *skb)
3174 {
3175 const struct htt_resp *resp = (void *)skb->data;
3176 size_t len;
3177 int num_resp_ids;
3178
3179 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
3180
3181 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
3182 if (unlikely(skb->len < len)) {
3183 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
3184 return;
3185 }
3186
3187 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
3188 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
3189
3190 if (unlikely(skb->len < len)) {
3191 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
3192 return;
3193 }
3194
3195 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
3196 resp->tx_fetch_confirm.resp_ids,
3197 num_resp_ids);
3198 }
3199
ath10k_htt_rx_tx_mode_switch_ind(struct ath10k * ar,struct sk_buff * skb)3200 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
3201 struct sk_buff *skb)
3202 {
3203 const struct htt_resp *resp = (void *)skb->data;
3204 const struct htt_tx_mode_switch_record *record;
3205 struct ieee80211_txq *txq;
3206 struct ath10k_txq *artxq;
3207 size_t len;
3208 size_t num_records;
3209 enum htt_tx_mode_switch_mode mode;
3210 bool enable;
3211 u16 info0;
3212 u16 info1;
3213 u16 threshold;
3214 u16 peer_id;
3215 u8 tid;
3216 int i;
3217
3218 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
3219
3220 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
3221 if (unlikely(skb->len < len)) {
3222 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
3223 return;
3224 }
3225
3226 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
3227 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
3228
3229 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
3230 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3231 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
3232 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3233
3234 ath10k_dbg(ar, ATH10K_DBG_HTT,
3235 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
3236 info0, info1, enable, num_records, mode, threshold);
3237
3238 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
3239
3240 if (unlikely(skb->len < len)) {
3241 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
3242 return;
3243 }
3244
3245 switch (mode) {
3246 case HTT_TX_MODE_SWITCH_PUSH:
3247 case HTT_TX_MODE_SWITCH_PUSH_PULL:
3248 break;
3249 default:
3250 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
3251 mode);
3252 return;
3253 }
3254
3255 if (!enable)
3256 return;
3257
3258 ar->htt.tx_q_state.enabled = enable;
3259 ar->htt.tx_q_state.mode = mode;
3260 ar->htt.tx_q_state.num_push_allowed = threshold;
3261
3262 rcu_read_lock();
3263
3264 for (i = 0; i < num_records; i++) {
3265 record = &resp->tx_mode_switch_ind.records[i];
3266 info0 = le16_to_cpu(record->info0);
3267 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
3268 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
3269
3270 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3271 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3272 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
3273 peer_id, tid);
3274 continue;
3275 }
3276
3277 spin_lock_bh(&ar->data_lock);
3278 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3279 spin_unlock_bh(&ar->data_lock);
3280
3281 /* It is okay to release the lock and use txq because RCU read
3282 * lock is held.
3283 */
3284
3285 if (unlikely(!txq)) {
3286 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
3287 peer_id, tid);
3288 continue;
3289 }
3290
3291 spin_lock_bh(&ar->htt.tx_lock);
3292 artxq = (void *)txq->drv_priv;
3293 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
3294 spin_unlock_bh(&ar->htt.tx_lock);
3295 }
3296
3297 rcu_read_unlock();
3298
3299 ath10k_mac_tx_push_pending(ar);
3300 }
3301
ath10k_htt_htc_t2h_msg_handler(struct ath10k * ar,struct sk_buff * skb)3302 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
3303 {
3304 bool release;
3305
3306 release = ath10k_htt_t2h_msg_handler(ar, skb);
3307
3308 /* Free the indication buffer */
3309 if (release)
3310 dev_kfree_skb_any(skb);
3311 }
3312
ath10k_get_legacy_rate_idx(struct ath10k * ar,u8 rate)3313 static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
3314 {
3315 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
3316 18, 24, 36, 48, 54};
3317 int i;
3318
3319 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
3320 if (rate == legacy_rates[i])
3321 return i;
3322 }
3323
3324 ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate);
3325 return -EINVAL;
3326 }
3327
3328 static void
ath10k_accumulate_per_peer_tx_stats(struct ath10k * ar,struct ath10k_sta * arsta,struct ath10k_per_peer_tx_stats * pstats,s8 legacy_rate_idx)3329 ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
3330 struct ath10k_sta *arsta,
3331 struct ath10k_per_peer_tx_stats *pstats,
3332 s8 legacy_rate_idx)
3333 {
3334 struct rate_info *txrate = &arsta->txrate;
3335 struct ath10k_htt_tx_stats *tx_stats;
3336 int idx, ht_idx, gi, mcs, bw, nss;
3337 unsigned long flags;
3338
3339 if (!arsta->tx_stats)
3340 return;
3341
3342 tx_stats = arsta->tx_stats;
3343 flags = txrate->flags;
3344 gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
3345 mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
3346 bw = txrate->bw;
3347 nss = txrate->nss;
3348 ht_idx = mcs + (nss - 1) * 8;
3349 idx = mcs * 8 + 8 * 10 * (nss - 1);
3350 idx += bw * 2 + gi;
3351
3352 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3353
3354 if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
3355 STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
3356 STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
3357 STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
3358 STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
3359 STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
3360 STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
3361 } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3362 STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
3363 STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
3364 STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
3365 STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
3366 STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
3367 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
3368 } else {
3369 mcs = legacy_rate_idx;
3370
3371 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
3372 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
3373 STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
3374 STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
3375 STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
3376 STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
3377 }
3378
3379 if (ATH10K_HW_AMPDU(pstats->flags)) {
3380 tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
3381
3382 if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3383 STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
3384 pstats->succ_bytes + pstats->retry_bytes;
3385 STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
3386 pstats->succ_pkts + pstats->retry_pkts;
3387 } else {
3388 STATS_OP_FMT(AMPDU).vht[0][mcs] +=
3389 pstats->succ_bytes + pstats->retry_bytes;
3390 STATS_OP_FMT(AMPDU).vht[1][mcs] +=
3391 pstats->succ_pkts + pstats->retry_pkts;
3392 }
3393 STATS_OP_FMT(AMPDU).bw[0][bw] +=
3394 pstats->succ_bytes + pstats->retry_bytes;
3395 STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
3396 pstats->succ_bytes + pstats->retry_bytes;
3397 STATS_OP_FMT(AMPDU).gi[0][gi] +=
3398 pstats->succ_bytes + pstats->retry_bytes;
3399 STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
3400 pstats->succ_bytes + pstats->retry_bytes;
3401 STATS_OP_FMT(AMPDU).bw[1][bw] +=
3402 pstats->succ_pkts + pstats->retry_pkts;
3403 STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
3404 pstats->succ_pkts + pstats->retry_pkts;
3405 STATS_OP_FMT(AMPDU).gi[1][gi] +=
3406 pstats->succ_pkts + pstats->retry_pkts;
3407 STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
3408 pstats->succ_pkts + pstats->retry_pkts;
3409 } else {
3410 tx_stats->ack_fails +=
3411 ATH10K_HW_BA_FAIL(pstats->flags);
3412 }
3413
3414 STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
3415 STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
3416 STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
3417
3418 STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
3419 STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
3420 STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
3421
3422 STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
3423 STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
3424 STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
3425
3426 STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
3427 STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
3428 STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
3429
3430 STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
3431 STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
3432 STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
3433
3434 STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
3435 STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
3436 STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
3437
3438 if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
3439 STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
3440 STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
3441 STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
3442 STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
3443 STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
3444 STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
3445 }
3446
3447 tx_stats->tx_duration += pstats->duration;
3448 }
3449
3450 static void
ath10k_update_per_peer_tx_stats(struct ath10k * ar,struct ieee80211_sta * sta,struct ath10k_per_peer_tx_stats * peer_stats)3451 ath10k_update_per_peer_tx_stats(struct ath10k *ar,
3452 struct ieee80211_sta *sta,
3453 struct ath10k_per_peer_tx_stats *peer_stats)
3454 {
3455 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
3456 struct ieee80211_chanctx_conf *conf = NULL;
3457 u8 rate = 0, sgi;
3458 s8 rate_idx = 0;
3459 bool skip_auto_rate;
3460 struct rate_info txrate;
3461
3462 lockdep_assert_held(&ar->data_lock);
3463
3464 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
3465 txrate.bw = ATH10K_HW_BW(peer_stats->flags);
3466 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
3467 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
3468 sgi = ATH10K_HW_GI(peer_stats->flags);
3469 skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
3470
3471 /* Firmware's rate control skips broadcast/management frames,
3472 * if host has configure fixed rates and in some other special cases.
3473 */
3474 if (skip_auto_rate)
3475 return;
3476
3477 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
3478 ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
3479 return;
3480 }
3481
3482 if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
3483 (txrate.mcs > 7 || txrate.nss < 1)) {
3484 ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
3485 txrate.mcs, txrate.nss);
3486 return;
3487 }
3488
3489 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
3490 memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
3491 if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
3492 txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
3493 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
3494 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3495 if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
3496 rate = 5;
3497 rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
3498 if (rate_idx < 0)
3499 return;
3500 arsta->txrate.legacy = rate;
3501 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
3502 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
3503 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
3504 } else {
3505 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
3506 arsta->txrate.mcs = txrate.mcs;
3507 }
3508
3509 switch (txrate.flags) {
3510 case WMI_RATE_PREAMBLE_OFDM:
3511 if (arsta->arvif && arsta->arvif->vif)
3512 conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
3513 if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
3514 arsta->tx_info.status.rates[0].idx = rate_idx - 4;
3515 break;
3516 case WMI_RATE_PREAMBLE_CCK:
3517 arsta->tx_info.status.rates[0].idx = rate_idx;
3518 if (sgi)
3519 arsta->tx_info.status.rates[0].flags |=
3520 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
3521 IEEE80211_TX_RC_SHORT_GI);
3522 break;
3523 case WMI_RATE_PREAMBLE_HT:
3524 arsta->tx_info.status.rates[0].idx =
3525 txrate.mcs + ((txrate.nss - 1) * 8);
3526 if (sgi)
3527 arsta->tx_info.status.rates[0].flags |=
3528 IEEE80211_TX_RC_SHORT_GI;
3529 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
3530 break;
3531 case WMI_RATE_PREAMBLE_VHT:
3532 ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
3533 txrate.mcs, txrate.nss);
3534 if (sgi)
3535 arsta->tx_info.status.rates[0].flags |=
3536 IEEE80211_TX_RC_SHORT_GI;
3537 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
3538 break;
3539 }
3540
3541 arsta->txrate.nss = txrate.nss;
3542 arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
3543 arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
3544 if (sgi)
3545 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3546
3547 switch (arsta->txrate.bw) {
3548 case RATE_INFO_BW_40:
3549 arsta->tx_info.status.rates[0].flags |=
3550 IEEE80211_TX_RC_40_MHZ_WIDTH;
3551 break;
3552 case RATE_INFO_BW_80:
3553 arsta->tx_info.status.rates[0].flags |=
3554 IEEE80211_TX_RC_80_MHZ_WIDTH;
3555 break;
3556 }
3557
3558 if (peer_stats->succ_pkts) {
3559 arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
3560 arsta->tx_info.status.rates[0].count = 1;
3561 ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
3562 }
3563
3564 if (ath10k_debug_is_extd_tx_stats_enabled(ar))
3565 ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
3566 rate_idx);
3567 }
3568
ath10k_htt_fetch_peer_stats(struct ath10k * ar,struct sk_buff * skb)3569 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
3570 struct sk_buff *skb)
3571 {
3572 struct htt_resp *resp = (struct htt_resp *)skb->data;
3573 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3574 struct htt_per_peer_tx_stats_ind *tx_stats;
3575 struct ieee80211_sta *sta;
3576 struct ath10k_peer *peer;
3577 int peer_id, i;
3578 u8 ppdu_len, num_ppdu;
3579
3580 num_ppdu = resp->peer_tx_stats.num_ppdu;
3581 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
3582
3583 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
3584 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
3585 return;
3586 }
3587
3588 tx_stats = (struct htt_per_peer_tx_stats_ind *)
3589 (resp->peer_tx_stats.payload);
3590 peer_id = __le16_to_cpu(tx_stats->peer_id);
3591
3592 rcu_read_lock();
3593 spin_lock_bh(&ar->data_lock);
3594 peer = ath10k_peer_find_by_id(ar, peer_id);
3595 if (!peer || !peer->sta) {
3596 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
3597 peer_id);
3598 goto out;
3599 }
3600
3601 sta = peer->sta;
3602 for (i = 0; i < num_ppdu; i++) {
3603 tx_stats = (struct htt_per_peer_tx_stats_ind *)
3604 (resp->peer_tx_stats.payload + i * ppdu_len);
3605
3606 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
3607 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
3608 p_tx_stats->failed_bytes =
3609 __le32_to_cpu(tx_stats->failed_bytes);
3610 p_tx_stats->ratecode = tx_stats->ratecode;
3611 p_tx_stats->flags = tx_stats->flags;
3612 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
3613 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
3614 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
3615 p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
3616
3617 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
3618 }
3619
3620 out:
3621 spin_unlock_bh(&ar->data_lock);
3622 rcu_read_unlock();
3623 }
3624
ath10k_fetch_10_2_tx_stats(struct ath10k * ar,u8 * data)3625 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
3626 {
3627 struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
3628 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3629 struct ath10k_10_2_peer_tx_stats *tx_stats;
3630 struct ieee80211_sta *sta;
3631 struct ath10k_peer *peer;
3632 u16 log_type = __le16_to_cpu(hdr->log_type);
3633 u32 peer_id = 0, i;
3634
3635 if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
3636 return;
3637
3638 tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
3639 ATH10K_10_2_TX_STATS_OFFSET);
3640
3641 if (!tx_stats->tx_ppdu_cnt)
3642 return;
3643
3644 peer_id = tx_stats->peer_id;
3645
3646 rcu_read_lock();
3647 spin_lock_bh(&ar->data_lock);
3648 peer = ath10k_peer_find_by_id(ar, peer_id);
3649 if (!peer || !peer->sta) {
3650 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
3651 peer_id);
3652 goto out;
3653 }
3654
3655 sta = peer->sta;
3656 for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
3657 p_tx_stats->succ_bytes =
3658 __le16_to_cpu(tx_stats->success_bytes[i]);
3659 p_tx_stats->retry_bytes =
3660 __le16_to_cpu(tx_stats->retry_bytes[i]);
3661 p_tx_stats->failed_bytes =
3662 __le16_to_cpu(tx_stats->failed_bytes[i]);
3663 p_tx_stats->ratecode = tx_stats->ratecode[i];
3664 p_tx_stats->flags = tx_stats->flags[i];
3665 p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
3666 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
3667 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
3668
3669 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
3670 }
3671 spin_unlock_bh(&ar->data_lock);
3672 rcu_read_unlock();
3673
3674 return;
3675
3676 out:
3677 spin_unlock_bh(&ar->data_lock);
3678 rcu_read_unlock();
3679 }
3680
ath10k_htt_rx_pn_len(enum htt_security_types sec_type)3681 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
3682 {
3683 switch (sec_type) {
3684 case HTT_SECURITY_TKIP:
3685 case HTT_SECURITY_TKIP_NOMIC:
3686 case HTT_SECURITY_AES_CCMP:
3687 return 48;
3688 default:
3689 return 0;
3690 }
3691 }
3692
ath10k_htt_rx_sec_ind_handler(struct ath10k * ar,struct htt_security_indication * ev)3693 static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
3694 struct htt_security_indication *ev)
3695 {
3696 enum htt_txrx_sec_cast_type sec_index;
3697 enum htt_security_types sec_type;
3698 struct ath10k_peer *peer;
3699
3700 spin_lock_bh(&ar->data_lock);
3701
3702 peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
3703 if (!peer) {
3704 ath10k_warn(ar, "failed to find peer id %d for security indication",
3705 __le16_to_cpu(ev->peer_id));
3706 goto out;
3707 }
3708
3709 sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
3710
3711 if (ev->flags & HTT_SECURITY_IS_UNICAST)
3712 sec_index = HTT_TXRX_SEC_UCAST;
3713 else
3714 sec_index = HTT_TXRX_SEC_MCAST;
3715
3716 peer->rx_pn[sec_index].sec_type = sec_type;
3717 peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
3718
3719 memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
3720 memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
3721
3722 out:
3723 spin_unlock_bh(&ar->data_lock);
3724 }
3725
ath10k_htt_t2h_msg_handler(struct ath10k * ar,struct sk_buff * skb)3726 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
3727 {
3728 struct ath10k_htt *htt = &ar->htt;
3729 struct htt_resp *resp = (struct htt_resp *)skb->data;
3730 enum htt_t2h_msg_type type;
3731
3732 /* confirm alignment */
3733 if (!IS_ALIGNED((unsigned long)skb->data, 4))
3734 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
3735
3736 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
3737 resp->hdr.msg_type);
3738
3739 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
3740 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
3741 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
3742 return true;
3743 }
3744 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
3745
3746 switch (type) {
3747 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
3748 htt->target_version_major = resp->ver_resp.major;
3749 htt->target_version_minor = resp->ver_resp.minor;
3750 complete(&htt->target_version_received);
3751 break;
3752 }
3753 case HTT_T2H_MSG_TYPE_RX_IND:
3754 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
3755 return ath10k_htt_rx_proc_rx_ind_hl(htt,
3756 &resp->rx_ind_hl,
3757 skb,
3758 HTT_RX_PN_CHECK,
3759 HTT_RX_NON_TKIP_MIC);
3760 else
3761 ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
3762 break;
3763 case HTT_T2H_MSG_TYPE_PEER_MAP: {
3764 struct htt_peer_map_event ev = {
3765 .vdev_id = resp->peer_map.vdev_id,
3766 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
3767 };
3768 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
3769 ath10k_peer_map_event(htt, &ev);
3770 break;
3771 }
3772 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
3773 struct htt_peer_unmap_event ev = {
3774 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
3775 };
3776 ath10k_peer_unmap_event(htt, &ev);
3777 break;
3778 }
3779 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
3780 struct htt_tx_done tx_done = {};
3781 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
3782 int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
3783
3784 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
3785
3786 switch (status) {
3787 case HTT_MGMT_TX_STATUS_OK:
3788 tx_done.status = HTT_TX_COMPL_STATE_ACK;
3789 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
3790 ar->wmi.svc_map) &&
3791 (resp->mgmt_tx_completion.flags &
3792 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
3793 tx_done.ack_rssi =
3794 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
3795 info);
3796 }
3797 break;
3798 case HTT_MGMT_TX_STATUS_RETRY:
3799 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
3800 break;
3801 case HTT_MGMT_TX_STATUS_DROP:
3802 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
3803 break;
3804 }
3805
3806 status = ath10k_txrx_tx_unref(htt, &tx_done);
3807 if (!status) {
3808 spin_lock_bh(&htt->tx_lock);
3809 ath10k_htt_tx_mgmt_dec_pending(htt);
3810 spin_unlock_bh(&htt->tx_lock);
3811 }
3812 break;
3813 }
3814 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
3815 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
3816 break;
3817 case HTT_T2H_MSG_TYPE_SEC_IND: {
3818 struct ath10k *ar = htt->ar;
3819 struct htt_security_indication *ev = &resp->security_indication;
3820
3821 ath10k_htt_rx_sec_ind_handler(ar, ev);
3822 ath10k_dbg(ar, ATH10K_DBG_HTT,
3823 "sec ind peer_id %d unicast %d type %d\n",
3824 __le16_to_cpu(ev->peer_id),
3825 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
3826 MS(ev->flags, HTT_SECURITY_TYPE));
3827 complete(&ar->install_key_done);
3828 break;
3829 }
3830 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
3831 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
3832 skb->data, skb->len);
3833 atomic_inc(&htt->num_mpdus_ready);
3834
3835 return ath10k_htt_rx_proc_rx_frag_ind(htt,
3836 &resp->rx_frag_ind,
3837 skb);
3838 break;
3839 }
3840 case HTT_T2H_MSG_TYPE_TEST:
3841 break;
3842 case HTT_T2H_MSG_TYPE_STATS_CONF:
3843 trace_ath10k_htt_stats(ar, skb->data, skb->len);
3844 break;
3845 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
3846 /* Firmware can return tx frames if it's unable to fully
3847 * process them and suspects host may be able to fix it. ath10k
3848 * sends all tx frames as already inspected so this shouldn't
3849 * happen unless fw has a bug.
3850 */
3851 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
3852 break;
3853 case HTT_T2H_MSG_TYPE_RX_ADDBA:
3854 ath10k_htt_rx_addba(ar, resp);
3855 break;
3856 case HTT_T2H_MSG_TYPE_RX_DELBA:
3857 ath10k_htt_rx_delba(ar, resp);
3858 break;
3859 case HTT_T2H_MSG_TYPE_PKTLOG: {
3860 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
3861 skb->len -
3862 offsetof(struct htt_resp,
3863 pktlog_msg.payload));
3864
3865 if (ath10k_peer_stats_enabled(ar))
3866 ath10k_fetch_10_2_tx_stats(ar,
3867 resp->pktlog_msg.payload);
3868 break;
3869 }
3870 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
3871 /* Ignore this event because mac80211 takes care of Rx
3872 * aggregation reordering.
3873 */
3874 break;
3875 }
3876 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
3877 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
3878 return false;
3879 }
3880 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
3881 break;
3882 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
3883 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
3884 u32 freq = __le32_to_cpu(resp->chan_change.freq);
3885
3886 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
3887 ath10k_dbg(ar, ATH10K_DBG_HTT,
3888 "htt chan change freq %u phymode %s\n",
3889 freq, ath10k_wmi_phymode_str(phymode));
3890 break;
3891 }
3892 case HTT_T2H_MSG_TYPE_AGGR_CONF:
3893 break;
3894 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
3895 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
3896
3897 if (!tx_fetch_ind) {
3898 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
3899 break;
3900 }
3901 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
3902 break;
3903 }
3904 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
3905 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
3906 break;
3907 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
3908 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
3909 break;
3910 case HTT_T2H_MSG_TYPE_PEER_STATS:
3911 ath10k_htt_fetch_peer_stats(ar, skb);
3912 break;
3913 case HTT_T2H_MSG_TYPE_EN_STATS:
3914 default:
3915 ath10k_warn(ar, "htt event (%d) not handled\n",
3916 resp->hdr.msg_type);
3917 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
3918 skb->data, skb->len);
3919 break;
3920 }
3921 return true;
3922 }
3923 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
3924
ath10k_htt_rx_pktlog_completion_handler(struct ath10k * ar,struct sk_buff * skb)3925 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
3926 struct sk_buff *skb)
3927 {
3928 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
3929 dev_kfree_skb_any(skb);
3930 }
3931 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
3932
ath10k_htt_rx_deliver_msdu(struct ath10k * ar,int quota,int budget)3933 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
3934 {
3935 struct sk_buff *skb;
3936
3937 while (quota < budget) {
3938 if (skb_queue_empty(&ar->htt.rx_msdus_q))
3939 break;
3940
3941 skb = skb_dequeue(&ar->htt.rx_msdus_q);
3942 if (!skb)
3943 break;
3944 ath10k_process_rx(ar, skb);
3945 quota++;
3946 }
3947
3948 return quota;
3949 }
3950
ath10k_htt_txrx_compl_task(struct ath10k * ar,int budget)3951 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
3952 {
3953 struct ath10k_htt *htt = &ar->htt;
3954 struct htt_tx_done tx_done = {};
3955 struct sk_buff_head tx_ind_q;
3956 struct sk_buff *skb;
3957 unsigned long flags;
3958 int quota = 0, done, ret;
3959 bool resched_napi = false;
3960
3961 __skb_queue_head_init(&tx_ind_q);
3962
3963 /* Process pending frames before dequeuing more data
3964 * from hardware.
3965 */
3966 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
3967 if (quota == budget) {
3968 resched_napi = true;
3969 goto exit;
3970 }
3971
3972 while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
3973 spin_lock_bh(&htt->rx_ring.lock);
3974 ret = ath10k_htt_rx_in_ord_ind(ar, skb);
3975 spin_unlock_bh(&htt->rx_ring.lock);
3976
3977 dev_kfree_skb_any(skb);
3978 if (ret == -EIO) {
3979 resched_napi = true;
3980 goto exit;
3981 }
3982 }
3983
3984 while (atomic_read(&htt->num_mpdus_ready)) {
3985 ret = ath10k_htt_rx_handle_amsdu(htt);
3986 if (ret == -EIO) {
3987 resched_napi = true;
3988 goto exit;
3989 }
3990 atomic_dec(&htt->num_mpdus_ready);
3991 }
3992
3993 /* Deliver received data after processing data from hardware */
3994 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
3995
3996 /* From NAPI documentation:
3997 * The napi poll() function may also process TX completions, in which
3998 * case if it processes the entire TX ring then it should count that
3999 * work as the rest of the budget.
4000 */
4001 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
4002 quota = budget;
4003
4004 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
4005 * From kfifo_get() documentation:
4006 * Note that with only one concurrent reader and one concurrent writer,
4007 * you don't need extra locking to use these macro.
4008 */
4009 while (kfifo_get(&htt->txdone_fifo, &tx_done))
4010 ath10k_txrx_tx_unref(htt, &tx_done);
4011
4012 ath10k_mac_tx_push_pending(ar);
4013
4014 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
4015 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
4016 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
4017
4018 while ((skb = __skb_dequeue(&tx_ind_q))) {
4019 ath10k_htt_rx_tx_fetch_ind(ar, skb);
4020 dev_kfree_skb_any(skb);
4021 }
4022
4023 exit:
4024 ath10k_htt_rx_msdu_buff_replenish(htt);
4025 /* In case of rx failure or more data to read, report budget
4026 * to reschedule NAPI poll
4027 */
4028 done = resched_napi ? budget : quota;
4029
4030 return done;
4031 }
4032 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
4033
4034 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
4035 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
4036 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
4037 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
4038 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
4039 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
4040 };
4041
4042 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
4043 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
4044 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
4045 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
4046 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
4047 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
4048 };
4049
4050 static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
4051 .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
4052 };
4053
ath10k_htt_set_rx_ops(struct ath10k_htt * htt)4054 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
4055 {
4056 struct ath10k *ar = htt->ar;
4057
4058 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
4059 htt->rx_ops = &htt_rx_ops_hl;
4060 else if (ar->hw_params.target_64bit)
4061 htt->rx_ops = &htt_rx_ops_64;
4062 else
4063 htt->rx_ops = &htt_rx_ops_32;
4064 }
4065