1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 */
5
6 #include <crypto/hash.h>
7 #include "core.h"
8 #include "dp_tx.h"
9 #include "hal_tx.h"
10 #include "hif.h"
11 #include "debug.h"
12 #include "dp_rx.h"
13 #include "peer.h"
14
ath11k_dp_htt_htc_tx_complete(struct ath11k_base * ab,struct sk_buff * skb)15 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
16 struct sk_buff *skb)
17 {
18 dev_kfree_skb_any(skb);
19 }
20
ath11k_dp_peer_cleanup(struct ath11k * ar,int vdev_id,const u8 * addr)21 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
22 {
23 struct ath11k_base *ab = ar->ab;
24 struct ath11k_peer *peer;
25
26 /* TODO: Any other peer specific DP cleanup */
27
28 spin_lock_bh(&ab->base_lock);
29 peer = ath11k_peer_find(ab, vdev_id, addr);
30 if (!peer) {
31 ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
32 addr, vdev_id);
33 spin_unlock_bh(&ab->base_lock);
34 return;
35 }
36
37 ath11k_peer_rx_tid_cleanup(ar, peer);
38 crypto_free_shash(peer->tfm_mmic);
39 spin_unlock_bh(&ab->base_lock);
40 }
41
ath11k_dp_peer_setup(struct ath11k * ar,int vdev_id,const u8 * addr)42 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
43 {
44 struct ath11k_base *ab = ar->ab;
45 struct ath11k_peer *peer;
46 u32 reo_dest;
47 int ret = 0, tid;
48
49 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
50 reo_dest = ar->dp.mac_id + 1;
51 ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
52 WMI_PEER_SET_DEFAULT_ROUTING,
53 DP_RX_HASH_ENABLE | (reo_dest << 1));
54
55 if (ret) {
56 ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
57 ret, addr, vdev_id);
58 return ret;
59 }
60
61 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
62 ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
63 HAL_PN_TYPE_NONE);
64 if (ret) {
65 ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
66 tid, ret);
67 goto peer_clean;
68 }
69 }
70
71 ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
72 if (ret) {
73 ath11k_warn(ab, "failed to setup rx defrag context\n");
74 return ret;
75 }
76
77 /* TODO: Setup other peer specific resource used in data path */
78
79 return 0;
80
81 peer_clean:
82 spin_lock_bh(&ab->base_lock);
83
84 peer = ath11k_peer_find(ab, vdev_id, addr);
85 if (!peer) {
86 ath11k_warn(ab, "failed to find the peer to del rx tid\n");
87 spin_unlock_bh(&ab->base_lock);
88 return -ENOENT;
89 }
90
91 for (; tid >= 0; tid--)
92 ath11k_peer_rx_tid_delete(ar, peer, tid);
93
94 spin_unlock_bh(&ab->base_lock);
95
96 return ret;
97 }
98
ath11k_dp_srng_cleanup(struct ath11k_base * ab,struct dp_srng * ring)99 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
100 {
101 if (!ring->vaddr_unaligned)
102 return;
103
104 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
105 ring->paddr_unaligned);
106
107 ring->vaddr_unaligned = NULL;
108 }
109
ath11k_dp_srng_find_ring_in_mask(int ring_num,const u8 * grp_mask)110 static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
111 {
112 int ext_group_num;
113 u8 mask = 1 << ring_num;
114
115 for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
116 ext_group_num++) {
117 if (mask & grp_mask[ext_group_num])
118 return ext_group_num;
119 }
120
121 return -ENOENT;
122 }
123
ath11k_dp_srng_calculate_msi_group(struct ath11k_base * ab,enum hal_ring_type type,int ring_num)124 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
125 enum hal_ring_type type, int ring_num)
126 {
127 const u8 *grp_mask;
128
129 switch (type) {
130 case HAL_WBM2SW_RELEASE:
131 if (ring_num < 3) {
132 grp_mask = &ab->hw_params.ring_mask->tx[0];
133 } else if (ring_num == 3) {
134 grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
135 ring_num = 0;
136 } else {
137 return -ENOENT;
138 }
139 break;
140 case HAL_REO_EXCEPTION:
141 grp_mask = &ab->hw_params.ring_mask->rx_err[0];
142 break;
143 case HAL_REO_DST:
144 grp_mask = &ab->hw_params.ring_mask->rx[0];
145 break;
146 case HAL_REO_STATUS:
147 grp_mask = &ab->hw_params.ring_mask->reo_status[0];
148 break;
149 case HAL_RXDMA_MONITOR_STATUS:
150 case HAL_RXDMA_MONITOR_DST:
151 grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
152 break;
153 case HAL_RXDMA_DST:
154 grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
155 break;
156 case HAL_RXDMA_BUF:
157 grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
158 break;
159 case HAL_RXDMA_MONITOR_BUF:
160 case HAL_TCL_DATA:
161 case HAL_TCL_CMD:
162 case HAL_REO_CMD:
163 case HAL_SW2WBM_RELEASE:
164 case HAL_WBM_IDLE_LINK:
165 case HAL_TCL_STATUS:
166 case HAL_REO_REINJECT:
167 case HAL_CE_SRC:
168 case HAL_CE_DST:
169 case HAL_CE_DST_STATUS:
170 default:
171 return -ENOENT;
172 }
173
174 return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
175 }
176
ath11k_dp_srng_msi_setup(struct ath11k_base * ab,struct hal_srng_params * ring_params,enum hal_ring_type type,int ring_num)177 static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
178 struct hal_srng_params *ring_params,
179 enum hal_ring_type type, int ring_num)
180 {
181 int msi_group_number, msi_data_count;
182 u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
183 int ret;
184
185 ret = ath11k_get_user_msi_vector(ab, "DP",
186 &msi_data_count, &msi_data_start,
187 &msi_irq_start);
188 if (ret)
189 return;
190
191 msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
192 ring_num);
193 if (msi_group_number < 0) {
194 ath11k_dbg(ab, ATH11K_DBG_PCI,
195 "ring not part of an ext_group; ring_type: %d,ring_num %d",
196 type, ring_num);
197 ring_params->msi_addr = 0;
198 ring_params->msi_data = 0;
199 return;
200 }
201
202 if (msi_group_number > msi_data_count) {
203 ath11k_dbg(ab, ATH11K_DBG_PCI,
204 "multiple msi_groups share one msi, msi_group_num %d",
205 msi_group_number);
206 }
207
208 ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
209
210 ring_params->msi_addr = addr_lo;
211 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
212 ring_params->msi_data = (msi_group_number % msi_data_count)
213 + msi_data_start;
214 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
215 }
216
ath11k_dp_srng_setup(struct ath11k_base * ab,struct dp_srng * ring,enum hal_ring_type type,int ring_num,int mac_id,int num_entries)217 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
218 enum hal_ring_type type, int ring_num,
219 int mac_id, int num_entries)
220 {
221 struct hal_srng_params params = { 0 };
222 int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
223 int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
224 int ret;
225
226 if (max_entries < 0 || entry_sz < 0)
227 return -EINVAL;
228
229 if (num_entries > max_entries)
230 num_entries = max_entries;
231
232 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
233 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
234 &ring->paddr_unaligned,
235 GFP_KERNEL);
236 if (!ring->vaddr_unaligned)
237 return -ENOMEM;
238
239 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
240 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
241 (unsigned long)ring->vaddr_unaligned);
242
243 params.ring_base_vaddr = ring->vaddr;
244 params.ring_base_paddr = ring->paddr;
245 params.num_entries = num_entries;
246 ath11k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id);
247
248 switch (type) {
249 case HAL_REO_DST:
250 params.intr_batch_cntr_thres_entries =
251 HAL_SRNG_INT_BATCH_THRESHOLD_RX;
252 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
253 break;
254 case HAL_RXDMA_BUF:
255 case HAL_RXDMA_MONITOR_BUF:
256 case HAL_RXDMA_MONITOR_STATUS:
257 params.low_threshold = num_entries >> 3;
258 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
259 params.intr_batch_cntr_thres_entries = 0;
260 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
261 break;
262 case HAL_WBM2SW_RELEASE:
263 if (ring_num < 3) {
264 params.intr_batch_cntr_thres_entries =
265 HAL_SRNG_INT_BATCH_THRESHOLD_TX;
266 params.intr_timer_thres_us =
267 HAL_SRNG_INT_TIMER_THRESHOLD_TX;
268 break;
269 }
270 /* follow through when ring_num >= 3 */
271 fallthrough;
272 case HAL_REO_EXCEPTION:
273 case HAL_REO_REINJECT:
274 case HAL_REO_CMD:
275 case HAL_REO_STATUS:
276 case HAL_TCL_DATA:
277 case HAL_TCL_CMD:
278 case HAL_TCL_STATUS:
279 case HAL_WBM_IDLE_LINK:
280 case HAL_SW2WBM_RELEASE:
281 case HAL_RXDMA_DST:
282 case HAL_RXDMA_MONITOR_DST:
283 case HAL_RXDMA_MONITOR_DESC:
284 params.intr_batch_cntr_thres_entries =
285 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
286 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
287 break;
288 case HAL_RXDMA_DIR_BUF:
289 break;
290 default:
291 ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
292 return -EINVAL;
293 }
294
295 ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms);
296 if (ret < 0) {
297 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
298 ret, ring_num);
299 return ret;
300 }
301
302 ring->ring_id = ret;
303
304 return 0;
305 }
306
ath11k_dp_stop_shadow_timers(struct ath11k_base * ab)307 static void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
308 {
309 int i;
310
311 if (!ab->hw_params.supports_shadow_regs)
312 return;
313
314 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
315 ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
316
317 ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
318 }
319
ath11k_dp_srng_common_cleanup(struct ath11k_base * ab)320 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
321 {
322 struct ath11k_dp *dp = &ab->dp;
323 int i;
324
325 ath11k_dp_stop_shadow_timers(ab);
326 ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
327 ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
328 ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
329 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
330 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
331 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
332 }
333 ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
334 ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
335 ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
336 ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
337 ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
338 }
339
ath11k_dp_srng_common_setup(struct ath11k_base * ab)340 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
341 {
342 struct ath11k_dp *dp = &ab->dp;
343 struct hal_srng *srng;
344 int i, ret;
345 u32 ring_hash_map;
346
347 ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
348 HAL_SW2WBM_RELEASE, 0, 0,
349 DP_WBM_RELEASE_RING_SIZE);
350 if (ret) {
351 ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
352 ret);
353 goto err;
354 }
355
356 ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
357 DP_TCL_CMD_RING_SIZE);
358 if (ret) {
359 ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
360 goto err;
361 }
362
363 ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
364 0, 0, DP_TCL_STATUS_RING_SIZE);
365 if (ret) {
366 ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
367 goto err;
368 }
369
370 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
371 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
372 HAL_TCL_DATA, i, 0,
373 DP_TCL_DATA_RING_SIZE);
374 if (ret) {
375 ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
376 i, ret);
377 goto err;
378 }
379
380 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
381 HAL_WBM2SW_RELEASE, i, 0,
382 DP_TX_COMP_RING_SIZE);
383 if (ret) {
384 ath11k_warn(ab, "failed to set up tcl_comp ring ring (%d) :%d\n",
385 i, ret);
386 goto err;
387 }
388
389 srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
390 ath11k_hal_tx_init_data_ring(ab, srng);
391
392 ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
393 ATH11K_SHADOW_DP_TIMER_INTERVAL,
394 dp->tx_ring[i].tcl_data_ring.ring_id);
395 }
396
397 ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
398 0, 0, DP_REO_REINJECT_RING_SIZE);
399 if (ret) {
400 ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
401 ret);
402 goto err;
403 }
404
405 ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
406 3, 0, DP_RX_RELEASE_RING_SIZE);
407 if (ret) {
408 ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
409 goto err;
410 }
411
412 ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
413 0, 0, DP_REO_EXCEPTION_RING_SIZE);
414 if (ret) {
415 ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
416 ret);
417 goto err;
418 }
419
420 ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
421 0, 0, DP_REO_CMD_RING_SIZE);
422 if (ret) {
423 ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
424 goto err;
425 }
426
427 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
428 ath11k_hal_reo_init_cmd_ring(ab, srng);
429
430 ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
431 ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
432 dp->reo_cmd_ring.ring_id);
433
434 ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
435 0, 0, DP_REO_STATUS_RING_SIZE);
436 if (ret) {
437 ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
438 goto err;
439 }
440
441 /* When hash based routing of rx packet is enabled, 32 entries to map
442 * the hash values to the ring will be configured. Each hash entry uses
443 * three bits to map to a particular ring. The ring mapping will be
444 * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:Not used.
445 */
446 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
447 HAL_HASH_ROUTING_RING_SW2 << 3 |
448 HAL_HASH_ROUTING_RING_SW3 << 6 |
449 HAL_HASH_ROUTING_RING_SW4 << 9 |
450 HAL_HASH_ROUTING_RING_SW1 << 12 |
451 HAL_HASH_ROUTING_RING_SW2 << 15 |
452 HAL_HASH_ROUTING_RING_SW3 << 18 |
453 HAL_HASH_ROUTING_RING_SW4 << 21;
454
455 ath11k_hal_reo_hw_setup(ab, ring_hash_map);
456
457 return 0;
458
459 err:
460 ath11k_dp_srng_common_cleanup(ab);
461
462 return ret;
463 }
464
ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base * ab)465 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
466 {
467 struct ath11k_dp *dp = &ab->dp;
468 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
469 int i;
470
471 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
472 if (!slist[i].vaddr)
473 continue;
474
475 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
476 slist[i].vaddr, slist[i].paddr);
477 slist[i].vaddr = NULL;
478 }
479 }
480
ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base * ab,int size,u32 n_link_desc_bank,u32 n_link_desc,u32 last_bank_sz)481 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
482 int size,
483 u32 n_link_desc_bank,
484 u32 n_link_desc,
485 u32 last_bank_sz)
486 {
487 struct ath11k_dp *dp = &ab->dp;
488 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
489 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
490 u32 n_entries_per_buf;
491 int num_scatter_buf, scatter_idx;
492 struct hal_wbm_link_desc *scatter_buf;
493 int align_bytes, n_entries;
494 dma_addr_t paddr;
495 int rem_entries;
496 int i;
497 int ret = 0;
498 u32 end_offset;
499
500 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
501 ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
502 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
503
504 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
505 return -EINVAL;
506
507 for (i = 0; i < num_scatter_buf; i++) {
508 slist[i].vaddr = dma_alloc_coherent(ab->dev,
509 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
510 &slist[i].paddr, GFP_KERNEL);
511 if (!slist[i].vaddr) {
512 ret = -ENOMEM;
513 goto err;
514 }
515 }
516
517 scatter_idx = 0;
518 scatter_buf = slist[scatter_idx].vaddr;
519 rem_entries = n_entries_per_buf;
520
521 for (i = 0; i < n_link_desc_bank; i++) {
522 align_bytes = link_desc_banks[i].vaddr -
523 link_desc_banks[i].vaddr_unaligned;
524 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
525 HAL_LINK_DESC_SIZE;
526 paddr = link_desc_banks[i].paddr;
527 while (n_entries) {
528 ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
529 n_entries--;
530 paddr += HAL_LINK_DESC_SIZE;
531 if (rem_entries) {
532 rem_entries--;
533 scatter_buf++;
534 continue;
535 }
536
537 rem_entries = n_entries_per_buf;
538 scatter_idx++;
539 scatter_buf = slist[scatter_idx].vaddr;
540 }
541 }
542
543 end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
544 sizeof(struct hal_wbm_link_desc);
545 ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
546 n_link_desc, end_offset);
547
548 return 0;
549
550 err:
551 ath11k_dp_scatter_idle_link_desc_cleanup(ab);
552
553 return ret;
554 }
555
556 static void
ath11k_dp_link_desc_bank_free(struct ath11k_base * ab,struct dp_link_desc_bank * link_desc_banks)557 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
558 struct dp_link_desc_bank *link_desc_banks)
559 {
560 int i;
561
562 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
563 if (link_desc_banks[i].vaddr_unaligned) {
564 dma_free_coherent(ab->dev,
565 link_desc_banks[i].size,
566 link_desc_banks[i].vaddr_unaligned,
567 link_desc_banks[i].paddr_unaligned);
568 link_desc_banks[i].vaddr_unaligned = NULL;
569 }
570 }
571 }
572
ath11k_dp_link_desc_bank_alloc(struct ath11k_base * ab,struct dp_link_desc_bank * desc_bank,int n_link_desc_bank,int last_bank_sz)573 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
574 struct dp_link_desc_bank *desc_bank,
575 int n_link_desc_bank,
576 int last_bank_sz)
577 {
578 struct ath11k_dp *dp = &ab->dp;
579 int i;
580 int ret = 0;
581 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
582
583 for (i = 0; i < n_link_desc_bank; i++) {
584 if (i == (n_link_desc_bank - 1) && last_bank_sz)
585 desc_sz = last_bank_sz;
586
587 desc_bank[i].vaddr_unaligned =
588 dma_alloc_coherent(ab->dev, desc_sz,
589 &desc_bank[i].paddr_unaligned,
590 GFP_KERNEL);
591 if (!desc_bank[i].vaddr_unaligned) {
592 ret = -ENOMEM;
593 goto err;
594 }
595
596 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
597 HAL_LINK_DESC_ALIGN);
598 desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
599 ((unsigned long)desc_bank[i].vaddr -
600 (unsigned long)desc_bank[i].vaddr_unaligned);
601 desc_bank[i].size = desc_sz;
602 }
603
604 return 0;
605
606 err:
607 ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
608
609 return ret;
610 }
611
ath11k_dp_link_desc_cleanup(struct ath11k_base * ab,struct dp_link_desc_bank * desc_bank,u32 ring_type,struct dp_srng * ring)612 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
613 struct dp_link_desc_bank *desc_bank,
614 u32 ring_type, struct dp_srng *ring)
615 {
616 ath11k_dp_link_desc_bank_free(ab, desc_bank);
617
618 if (ring_type != HAL_RXDMA_MONITOR_DESC) {
619 ath11k_dp_srng_cleanup(ab, ring);
620 ath11k_dp_scatter_idle_link_desc_cleanup(ab);
621 }
622 }
623
ath11k_wbm_idle_ring_setup(struct ath11k_base * ab,u32 * n_link_desc)624 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
625 {
626 struct ath11k_dp *dp = &ab->dp;
627 u32 n_mpdu_link_desc, n_mpdu_queue_desc;
628 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
629 int ret = 0;
630
631 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
632 HAL_NUM_MPDUS_PER_LINK_DESC;
633
634 n_mpdu_queue_desc = n_mpdu_link_desc /
635 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
636
637 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
638 DP_AVG_MSDUS_PER_FLOW) /
639 HAL_NUM_TX_MSDUS_PER_LINK_DESC;
640
641 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
642 DP_AVG_MSDUS_PER_MPDU) /
643 HAL_NUM_RX_MSDUS_PER_LINK_DESC;
644
645 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
646 n_tx_msdu_link_desc + n_rx_msdu_link_desc;
647
648 if (*n_link_desc & (*n_link_desc - 1))
649 *n_link_desc = 1 << fls(*n_link_desc);
650
651 ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
652 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
653 if (ret) {
654 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
655 return ret;
656 }
657 return ret;
658 }
659
ath11k_dp_link_desc_setup(struct ath11k_base * ab,struct dp_link_desc_bank * link_desc_banks,u32 ring_type,struct hal_srng * srng,u32 n_link_desc)660 int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
661 struct dp_link_desc_bank *link_desc_banks,
662 u32 ring_type, struct hal_srng *srng,
663 u32 n_link_desc)
664 {
665 u32 tot_mem_sz;
666 u32 n_link_desc_bank, last_bank_sz;
667 u32 entry_sz, align_bytes, n_entries;
668 u32 paddr;
669 u32 *desc;
670 int i, ret;
671
672 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
673 tot_mem_sz += HAL_LINK_DESC_ALIGN;
674
675 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
676 n_link_desc_bank = 1;
677 last_bank_sz = tot_mem_sz;
678 } else {
679 n_link_desc_bank = tot_mem_sz /
680 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
681 HAL_LINK_DESC_ALIGN);
682 last_bank_sz = tot_mem_sz %
683 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
684 HAL_LINK_DESC_ALIGN);
685
686 if (last_bank_sz)
687 n_link_desc_bank += 1;
688 }
689
690 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
691 return -EINVAL;
692
693 ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
694 n_link_desc_bank, last_bank_sz);
695 if (ret)
696 return ret;
697
698 /* Setup link desc idle list for HW internal usage */
699 entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
700 tot_mem_sz = entry_sz * n_link_desc;
701
702 /* Setup scatter desc list when the total memory requirement is more */
703 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
704 ring_type != HAL_RXDMA_MONITOR_DESC) {
705 ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
706 n_link_desc_bank,
707 n_link_desc,
708 last_bank_sz);
709 if (ret) {
710 ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
711 ret);
712 goto fail_desc_bank_free;
713 }
714
715 return 0;
716 }
717
718 spin_lock_bh(&srng->lock);
719
720 ath11k_hal_srng_access_begin(ab, srng);
721
722 for (i = 0; i < n_link_desc_bank; i++) {
723 align_bytes = link_desc_banks[i].vaddr -
724 link_desc_banks[i].vaddr_unaligned;
725 n_entries = (link_desc_banks[i].size - align_bytes) /
726 HAL_LINK_DESC_SIZE;
727 paddr = link_desc_banks[i].paddr;
728 while (n_entries &&
729 (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
730 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
731 i, paddr);
732 n_entries--;
733 paddr += HAL_LINK_DESC_SIZE;
734 }
735 }
736
737 ath11k_hal_srng_access_end(ab, srng);
738
739 spin_unlock_bh(&srng->lock);
740
741 return 0;
742
743 fail_desc_bank_free:
744 ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
745
746 return ret;
747 }
748
ath11k_dp_service_srng(struct ath11k_base * ab,struct ath11k_ext_irq_grp * irq_grp,int budget)749 int ath11k_dp_service_srng(struct ath11k_base *ab,
750 struct ath11k_ext_irq_grp *irq_grp,
751 int budget)
752 {
753 struct napi_struct *napi = &irq_grp->napi;
754 int grp_id = irq_grp->grp_id;
755 int work_done = 0;
756 int i = 0, j;
757 int tot_work_done = 0;
758
759 while (ab->hw_params.ring_mask->tx[grp_id] >> i) {
760 if (ab->hw_params.ring_mask->tx[grp_id] & BIT(i))
761 ath11k_dp_tx_completion_handler(ab, i);
762 i++;
763 }
764
765 if (ab->hw_params.ring_mask->rx_err[grp_id]) {
766 work_done = ath11k_dp_process_rx_err(ab, napi, budget);
767 budget -= work_done;
768 tot_work_done += work_done;
769 if (budget <= 0)
770 goto done;
771 }
772
773 if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
774 work_done = ath11k_dp_rx_process_wbm_err(ab,
775 napi,
776 budget);
777 budget -= work_done;
778 tot_work_done += work_done;
779
780 if (budget <= 0)
781 goto done;
782 }
783
784 if (ab->hw_params.ring_mask->rx[grp_id]) {
785 i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
786 work_done = ath11k_dp_process_rx(ab, i, napi,
787 budget);
788 budget -= work_done;
789 tot_work_done += work_done;
790 if (budget <= 0)
791 goto done;
792 }
793
794 if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
795 for (i = 0; i < ab->num_radios; i++) {
796 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
797 int id = i * ab->hw_params.num_rxmda_per_pdev + j;
798
799 if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
800 BIT(id)) {
801 work_done =
802 ath11k_dp_rx_process_mon_rings(ab,
803 id,
804 napi, budget);
805 budget -= work_done;
806 tot_work_done += work_done;
807
808 if (budget <= 0)
809 goto done;
810 }
811 }
812 }
813 }
814
815 if (ab->hw_params.ring_mask->reo_status[grp_id])
816 ath11k_dp_process_reo_status(ab);
817
818 for (i = 0; i < ab->num_radios; i++) {
819 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
820 int id = i * ab->hw_params.num_rxmda_per_pdev + j;
821
822 if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
823 work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
824 budget -= work_done;
825 tot_work_done += work_done;
826 }
827
828 if (budget <= 0)
829 goto done;
830
831 if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
832 struct ath11k *ar = ath11k_ab_to_ar(ab, id);
833 struct ath11k_pdev_dp *dp = &ar->dp;
834 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
835
836 ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
837 HAL_RX_BUF_RBM_SW3_BM);
838 }
839 }
840 }
841 /* TODO: Implement handler for other interrupts */
842
843 done:
844 return tot_work_done;
845 }
846 EXPORT_SYMBOL(ath11k_dp_service_srng);
847
ath11k_dp_pdev_free(struct ath11k_base * ab)848 void ath11k_dp_pdev_free(struct ath11k_base *ab)
849 {
850 struct ath11k *ar;
851 int i;
852
853 del_timer_sync(&ab->mon_reap_timer);
854
855 for (i = 0; i < ab->num_radios; i++) {
856 ar = ab->pdevs[i].ar;
857 ath11k_dp_rx_pdev_free(ab, i);
858 ath11k_debugfs_unregister(ar);
859 ath11k_dp_rx_pdev_mon_detach(ar);
860 }
861 }
862
ath11k_dp_pdev_pre_alloc(struct ath11k_base * ab)863 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
864 {
865 struct ath11k *ar;
866 struct ath11k_pdev_dp *dp;
867 int i;
868 int j;
869
870 for (i = 0; i < ab->num_radios; i++) {
871 ar = ab->pdevs[i].ar;
872 dp = &ar->dp;
873 dp->mac_id = i;
874 idr_init(&dp->rx_refill_buf_ring.bufs_idr);
875 spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
876 atomic_set(&dp->num_tx_pending, 0);
877 init_waitqueue_head(&dp->tx_empty_waitq);
878 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
879 idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
880 spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
881 }
882 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
883 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
884 }
885 }
886
ath11k_dp_pdev_alloc(struct ath11k_base * ab)887 int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
888 {
889 struct ath11k *ar;
890 int ret;
891 int i;
892
893 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
894 for (i = 0; i < ab->num_radios; i++) {
895 ar = ab->pdevs[i].ar;
896 ret = ath11k_dp_rx_pdev_alloc(ab, i);
897 if (ret) {
898 ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
899 i);
900 goto err;
901 }
902 ret = ath11k_dp_rx_pdev_mon_attach(ar);
903 if (ret) {
904 ath11k_warn(ab, "failed to initialize mon pdev %d\n",
905 i);
906 goto err;
907 }
908 }
909
910 return 0;
911
912 err:
913 ath11k_dp_pdev_free(ab);
914
915 return ret;
916 }
917
ath11k_dp_htt_connect(struct ath11k_dp * dp)918 int ath11k_dp_htt_connect(struct ath11k_dp *dp)
919 {
920 struct ath11k_htc_svc_conn_req conn_req;
921 struct ath11k_htc_svc_conn_resp conn_resp;
922 int status;
923
924 memset(&conn_req, 0, sizeof(conn_req));
925 memset(&conn_resp, 0, sizeof(conn_resp));
926
927 conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
928 conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
929
930 /* connect to control service */
931 conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
932
933 status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
934 &conn_resp);
935
936 if (status)
937 return status;
938
939 dp->eid = conn_resp.eid;
940
941 return 0;
942 }
943
ath11k_dp_update_vdev_search(struct ath11k_vif * arvif)944 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
945 {
946 /* When v2_map_support is true:for STA mode, enable address
947 * search index, tcl uses ast_hash value in the descriptor.
948 * When v2_map_support is false: for STA mode, dont' enable
949 * address search index.
950 */
951 switch (arvif->vdev_type) {
952 case WMI_VDEV_TYPE_STA:
953 if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
954 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
955 arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
956 } else {
957 arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
958 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
959 }
960 break;
961 case WMI_VDEV_TYPE_AP:
962 case WMI_VDEV_TYPE_IBSS:
963 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
964 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
965 break;
966 case WMI_VDEV_TYPE_MONITOR:
967 default:
968 return;
969 }
970 }
971
ath11k_dp_vdev_tx_attach(struct ath11k * ar,struct ath11k_vif * arvif)972 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
973 {
974 arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
975 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
976 arvif->vdev_id) |
977 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
978 ar->pdev->pdev_id);
979
980 /* set HTT extension valid bit to 0 by default */
981 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
982
983 ath11k_dp_update_vdev_search(arvif);
984 }
985
ath11k_dp_tx_pending_cleanup(int buf_id,void * skb,void * ctx)986 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
987 {
988 struct ath11k_base *ab = (struct ath11k_base *)ctx;
989 struct sk_buff *msdu = skb;
990
991 dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
992 DMA_TO_DEVICE);
993
994 dev_kfree_skb_any(msdu);
995
996 return 0;
997 }
998
ath11k_dp_free(struct ath11k_base * ab)999 void ath11k_dp_free(struct ath11k_base *ab)
1000 {
1001 struct ath11k_dp *dp = &ab->dp;
1002 int i;
1003
1004 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1005 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1006
1007 ath11k_dp_srng_common_cleanup(ab);
1008
1009 ath11k_dp_reo_cmd_list_cleanup(ab);
1010
1011 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
1012 spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1013 idr_for_each(&dp->tx_ring[i].txbuf_idr,
1014 ath11k_dp_tx_pending_cleanup, ab);
1015 idr_destroy(&dp->tx_ring[i].txbuf_idr);
1016 spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1017 kfree(dp->tx_ring[i].tx_status);
1018 }
1019
1020 /* Deinit any SOC level resource */
1021 }
1022
ath11k_dp_alloc(struct ath11k_base * ab)1023 int ath11k_dp_alloc(struct ath11k_base *ab)
1024 {
1025 struct ath11k_dp *dp = &ab->dp;
1026 struct hal_srng *srng = NULL;
1027 size_t size = 0;
1028 u32 n_link_desc = 0;
1029 int ret;
1030 int i;
1031
1032 dp->ab = ab;
1033
1034 INIT_LIST_HEAD(&dp->reo_cmd_list);
1035 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1036 spin_lock_init(&dp->reo_cmd_lock);
1037
1038 dp->reo_cmd_cache_flush_count = 0;
1039
1040 ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1041 if (ret) {
1042 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1043 return ret;
1044 }
1045
1046 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1047
1048 ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1049 HAL_WBM_IDLE_LINK, srng, n_link_desc);
1050 if (ret) {
1051 ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1052 return ret;
1053 }
1054
1055 ret = ath11k_dp_srng_common_setup(ab);
1056 if (ret)
1057 goto fail_link_desc_cleanup;
1058
1059 size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1060
1061 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
1062 idr_init(&dp->tx_ring[i].txbuf_idr);
1063 spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1064 dp->tx_ring[i].tcl_data_ring_id = i;
1065
1066 dp->tx_ring[i].tx_status_head = 0;
1067 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1068 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1069 if (!dp->tx_ring[i].tx_status) {
1070 ret = -ENOMEM;
1071 goto fail_cmn_srng_cleanup;
1072 }
1073 }
1074
1075 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1076 ath11k_hal_tx_set_dscp_tid_map(ab, i);
1077
1078 /* Init any SOC level resource for DP */
1079
1080 return 0;
1081
1082 fail_cmn_srng_cleanup:
1083 ath11k_dp_srng_common_cleanup(ab);
1084
1085 fail_link_desc_cleanup:
1086 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1087 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1088
1089 return ret;
1090 }
1091
ath11k_dp_shadow_timer_handler(struct timer_list * t)1092 static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1093 {
1094 struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
1095 t, timer);
1096 struct ath11k_base *ab = update_timer->ab;
1097 struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
1098
1099 spin_lock_bh(&srng->lock);
1100
1101 /* when the timer is fired, the handler checks whether there
1102 * are new TX happened. The handler updates HP only when there
1103 * are no TX operations during the timeout interval, and stop
1104 * the timer. Timer will be started again when TX happens again.
1105 */
1106 if (update_timer->timer_tx_num != update_timer->tx_num) {
1107 update_timer->timer_tx_num = update_timer->tx_num;
1108 mod_timer(&update_timer->timer, jiffies +
1109 msecs_to_jiffies(update_timer->interval));
1110 } else {
1111 update_timer->started = false;
1112 ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1113 }
1114
1115 spin_unlock_bh(&srng->lock);
1116 }
1117
ath11k_dp_shadow_start_timer(struct ath11k_base * ab,struct hal_srng * srng,struct ath11k_hp_update_timer * update_timer)1118 void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1119 struct hal_srng *srng,
1120 struct ath11k_hp_update_timer *update_timer)
1121 {
1122 lockdep_assert_held(&srng->lock);
1123
1124 if (!ab->hw_params.supports_shadow_regs)
1125 return;
1126
1127 update_timer->tx_num++;
1128
1129 if (update_timer->started)
1130 return;
1131
1132 update_timer->started = true;
1133 update_timer->timer_tx_num = update_timer->tx_num;
1134 mod_timer(&update_timer->timer, jiffies +
1135 msecs_to_jiffies(update_timer->interval));
1136 }
1137
ath11k_dp_shadow_stop_timer(struct ath11k_base * ab,struct ath11k_hp_update_timer * update_timer)1138 void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1139 struct ath11k_hp_update_timer *update_timer)
1140 {
1141 if (!ab->hw_params.supports_shadow_regs)
1142 return;
1143
1144 if (!update_timer->init)
1145 return;
1146
1147 del_timer_sync(&update_timer->timer);
1148 }
1149
ath11k_dp_shadow_init_timer(struct ath11k_base * ab,struct ath11k_hp_update_timer * update_timer,u32 interval,u32 ring_id)1150 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1151 struct ath11k_hp_update_timer *update_timer,
1152 u32 interval, u32 ring_id)
1153 {
1154 if (!ab->hw_params.supports_shadow_regs)
1155 return;
1156
1157 update_timer->tx_num = 0;
1158 update_timer->timer_tx_num = 0;
1159 update_timer->ab = ab;
1160 update_timer->ring_id = ring_id;
1161 update_timer->interval = interval;
1162 update_timer->init = true;
1163 timer_setup(&update_timer->timer,
1164 ath11k_dp_shadow_timer_handler, 0);
1165 }
1166