1 /*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
21 */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/interrupt.h>
26 #include <linux/soc/qcom/smem_state.h>
27 #include "wcn36xx.h"
28 #include "txrx.h"
29
wcn36xx_ccu_write_register(struct wcn36xx * wcn,int addr,int data)30 static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
31 {
32 wcn36xx_dbg(WCN36XX_DBG_DXE,
33 "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
34 addr, data);
35
36 writel(data, wcn->ccu_base + addr);
37 }
38
wcn36xx_dxe_write_register(struct wcn36xx * wcn,int addr,int data)39 static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
40 {
41 wcn36xx_dbg(WCN36XX_DBG_DXE,
42 "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
43 addr, data);
44
45 writel(data, wcn->dxe_base + addr);
46 }
47
wcn36xx_dxe_read_register(struct wcn36xx * wcn,int addr,int * data)48 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
49 {
50 *data = readl(wcn->dxe_base + addr);
51
52 wcn36xx_dbg(WCN36XX_DBG_DXE,
53 "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
54 addr, *data);
55 }
56
wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch * ch)57 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
58 {
59 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
60 int i;
61
62 for (i = 0; i < ch->desc_num && ctl; i++) {
63 next = ctl->next;
64 kfree(ctl);
65 ctl = next;
66 }
67 }
68
wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch * ch)69 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
70 {
71 struct wcn36xx_dxe_ctl *prev_ctl = NULL;
72 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
73 int i;
74
75 spin_lock_init(&ch->lock);
76 for (i = 0; i < ch->desc_num; i++) {
77 cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
78 if (!cur_ctl)
79 goto out_fail;
80
81 cur_ctl->ctl_blk_order = i;
82 if (i == 0) {
83 ch->head_blk_ctl = cur_ctl;
84 ch->tail_blk_ctl = cur_ctl;
85 } else if (ch->desc_num - 1 == i) {
86 prev_ctl->next = cur_ctl;
87 cur_ctl->next = ch->head_blk_ctl;
88 } else {
89 prev_ctl->next = cur_ctl;
90 }
91 prev_ctl = cur_ctl;
92 }
93
94 return 0;
95
96 out_fail:
97 wcn36xx_dxe_free_ctl_block(ch);
98 return -ENOMEM;
99 }
100
wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx * wcn)101 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
102 {
103 int ret;
104
105 wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
106 wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
107 wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
108 wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
109
110 wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
111 wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
112 wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
113 wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
114
115 wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
116 wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
117
118 wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
119 wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
120
121 wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
122 wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
123
124 wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
125 wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
126
127 wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
128 wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
129
130 /* DXE control block allocation */
131 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
132 if (ret)
133 goto out_err;
134 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
135 if (ret)
136 goto out_err;
137 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
138 if (ret)
139 goto out_err;
140 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
141 if (ret)
142 goto out_err;
143
144 /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
145 ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
146 WCN36XX_SMSM_WLAN_TX_ENABLE |
147 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
148 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
149 if (ret)
150 goto out_err;
151
152 return 0;
153
154 out_err:
155 wcn36xx_err("Failed to allocate DXE control blocks\n");
156 wcn36xx_dxe_free_ctl_blks(wcn);
157 return -ENOMEM;
158 }
159
wcn36xx_dxe_free_ctl_blks(struct wcn36xx * wcn)160 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
161 {
162 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
163 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
164 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
165 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
166 }
167
wcn36xx_dxe_init_descs(struct device * dev,struct wcn36xx_dxe_ch * wcn_ch)168 static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
169 {
170 struct wcn36xx_dxe_desc *cur_dxe = NULL;
171 struct wcn36xx_dxe_desc *prev_dxe = NULL;
172 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
173 size_t size;
174 int i;
175
176 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
177 wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
178 GFP_KERNEL);
179 if (!wcn_ch->cpu_addr)
180 return -ENOMEM;
181
182 cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
183 cur_ctl = wcn_ch->head_blk_ctl;
184
185 for (i = 0; i < wcn_ch->desc_num; i++) {
186 cur_ctl->desc = cur_dxe;
187 cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
188 i * sizeof(struct wcn36xx_dxe_desc);
189
190 switch (wcn_ch->ch_type) {
191 case WCN36XX_DXE_CH_TX_L:
192 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
193 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
194 break;
195 case WCN36XX_DXE_CH_TX_H:
196 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
197 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
198 break;
199 case WCN36XX_DXE_CH_RX_L:
200 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
201 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
202 break;
203 case WCN36XX_DXE_CH_RX_H:
204 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
205 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
206 break;
207 }
208 if (0 == i) {
209 cur_dxe->phy_next_l = 0;
210 } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
211 prev_dxe->phy_next_l =
212 cur_ctl->desc_phy_addr;
213 } else if (i == (wcn_ch->desc_num - 1)) {
214 prev_dxe->phy_next_l =
215 cur_ctl->desc_phy_addr;
216 cur_dxe->phy_next_l =
217 wcn_ch->head_blk_ctl->desc_phy_addr;
218 }
219 cur_ctl = cur_ctl->next;
220 prev_dxe = cur_dxe;
221 cur_dxe++;
222 }
223
224 return 0;
225 }
226
wcn36xx_dxe_deinit_descs(struct device * dev,struct wcn36xx_dxe_ch * wcn_ch)227 static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
228 {
229 size_t size;
230
231 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
232 dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
233 }
234
wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch * ch,struct wcn36xx_dxe_mem_pool * pool)235 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
236 struct wcn36xx_dxe_mem_pool *pool)
237 {
238 int i, chunk_size = pool->chunk_size;
239 dma_addr_t bd_phy_addr = pool->phy_addr;
240 void *bd_cpu_addr = pool->virt_addr;
241 struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
242
243 for (i = 0; i < ch->desc_num; i++) {
244 /* Only every second dxe needs a bd pointer,
245 the other will point to the skb data */
246 if (!(i & 1)) {
247 cur->bd_phy_addr = bd_phy_addr;
248 cur->bd_cpu_addr = bd_cpu_addr;
249 bd_phy_addr += chunk_size;
250 bd_cpu_addr += chunk_size;
251 } else {
252 cur->bd_phy_addr = 0;
253 cur->bd_cpu_addr = NULL;
254 }
255 cur = cur->next;
256 }
257 }
258
wcn36xx_dxe_enable_ch_int(struct wcn36xx * wcn,u16 wcn_ch)259 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
260 {
261 int reg_data = 0;
262
263 wcn36xx_dxe_read_register(wcn,
264 WCN36XX_DXE_INT_MASK_REG,
265 ®_data);
266
267 reg_data |= wcn_ch;
268
269 wcn36xx_dxe_write_register(wcn,
270 WCN36XX_DXE_INT_MASK_REG,
271 (int)reg_data);
272 return 0;
273 }
274
wcn36xx_dxe_fill_skb(struct device * dev,struct wcn36xx_dxe_ctl * ctl,gfp_t gfp)275 static int wcn36xx_dxe_fill_skb(struct device *dev,
276 struct wcn36xx_dxe_ctl *ctl,
277 gfp_t gfp)
278 {
279 struct wcn36xx_dxe_desc *dxe = ctl->desc;
280 struct sk_buff *skb;
281
282 skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
283 if (skb == NULL)
284 return -ENOMEM;
285
286 dxe->dst_addr_l = dma_map_single(dev,
287 skb_tail_pointer(skb),
288 WCN36XX_PKT_SIZE,
289 DMA_FROM_DEVICE);
290 if (dma_mapping_error(dev, dxe->dst_addr_l)) {
291 dev_err(dev, "unable to map skb\n");
292 kfree_skb(skb);
293 return -ENOMEM;
294 }
295 ctl->skb = skb;
296
297 return 0;
298 }
299
wcn36xx_dxe_ch_alloc_skb(struct wcn36xx * wcn,struct wcn36xx_dxe_ch * wcn_ch)300 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
301 struct wcn36xx_dxe_ch *wcn_ch)
302 {
303 int i;
304 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
305
306 cur_ctl = wcn_ch->head_blk_ctl;
307
308 for (i = 0; i < wcn_ch->desc_num; i++) {
309 wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
310 cur_ctl = cur_ctl->next;
311 }
312
313 return 0;
314 }
315
wcn36xx_dxe_ch_free_skbs(struct wcn36xx * wcn,struct wcn36xx_dxe_ch * wcn_ch)316 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
317 struct wcn36xx_dxe_ch *wcn_ch)
318 {
319 struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
320 int i;
321
322 for (i = 0; i < wcn_ch->desc_num; i++) {
323 kfree_skb(cur->skb);
324 cur = cur->next;
325 }
326 }
327
wcn36xx_dxe_tx_ack_ind(struct wcn36xx * wcn,u32 status)328 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
329 {
330 struct ieee80211_tx_info *info;
331 struct sk_buff *skb;
332 unsigned long flags;
333
334 spin_lock_irqsave(&wcn->dxe_lock, flags);
335 skb = wcn->tx_ack_skb;
336 wcn->tx_ack_skb = NULL;
337 spin_unlock_irqrestore(&wcn->dxe_lock, flags);
338
339 if (!skb) {
340 wcn36xx_warn("Spurious TX complete indication\n");
341 return;
342 }
343
344 info = IEEE80211_SKB_CB(skb);
345
346 if (status == 1)
347 info->flags |= IEEE80211_TX_STAT_ACK;
348
349 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
350
351 ieee80211_tx_status_irqsafe(wcn->hw, skb);
352 ieee80211_wake_queues(wcn->hw);
353 }
354
reap_tx_dxes(struct wcn36xx * wcn,struct wcn36xx_dxe_ch * ch)355 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
356 {
357 struct wcn36xx_dxe_ctl *ctl;
358 struct ieee80211_tx_info *info;
359 unsigned long flags;
360
361 /*
362 * Make at least one loop of do-while because in case ring is
363 * completely full head and tail are pointing to the same element
364 * and while-do will not make any cycles.
365 */
366 spin_lock_irqsave(&ch->lock, flags);
367 ctl = ch->tail_blk_ctl;
368 do {
369 if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
370 break;
371
372 if (ctl->skb &&
373 READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
374 dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
375 ctl->skb->len, DMA_TO_DEVICE);
376 info = IEEE80211_SKB_CB(ctl->skb);
377 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
378 /* Keep frame until TX status comes */
379 ieee80211_free_txskb(wcn->hw, ctl->skb);
380 }
381
382 if (wcn->queues_stopped) {
383 wcn->queues_stopped = false;
384 ieee80211_wake_queues(wcn->hw);
385 }
386
387 ctl->skb = NULL;
388 }
389 ctl = ctl->next;
390 } while (ctl != ch->head_blk_ctl);
391
392 ch->tail_blk_ctl = ctl;
393 spin_unlock_irqrestore(&ch->lock, flags);
394 }
395
wcn36xx_irq_tx_complete(int irq,void * dev)396 static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
397 {
398 struct wcn36xx *wcn = (struct wcn36xx *)dev;
399 int int_src, int_reason;
400
401 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
402
403 if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
404 wcn36xx_dxe_read_register(wcn,
405 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
406 &int_reason);
407
408 wcn36xx_dxe_write_register(wcn,
409 WCN36XX_DXE_0_INT_CLR,
410 WCN36XX_INT_MASK_CHAN_TX_H);
411
412 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
413 wcn36xx_dxe_write_register(wcn,
414 WCN36XX_DXE_0_INT_ERR_CLR,
415 WCN36XX_INT_MASK_CHAN_TX_H);
416
417 wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
418 int_src);
419 }
420
421 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
422 wcn36xx_dxe_write_register(wcn,
423 WCN36XX_DXE_0_INT_DONE_CLR,
424 WCN36XX_INT_MASK_CHAN_TX_H);
425 }
426
427 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
428 wcn36xx_dxe_write_register(wcn,
429 WCN36XX_DXE_0_INT_ED_CLR,
430 WCN36XX_INT_MASK_CHAN_TX_H);
431 }
432
433 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
434 int_reason);
435
436 if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
437 WCN36XX_CH_STAT_INT_ED_MASK))
438 reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
439 }
440
441 if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
442 wcn36xx_dxe_read_register(wcn,
443 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
444 &int_reason);
445
446 wcn36xx_dxe_write_register(wcn,
447 WCN36XX_DXE_0_INT_CLR,
448 WCN36XX_INT_MASK_CHAN_TX_L);
449
450
451 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
452 wcn36xx_dxe_write_register(wcn,
453 WCN36XX_DXE_0_INT_ERR_CLR,
454 WCN36XX_INT_MASK_CHAN_TX_L);
455
456 wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
457 int_src);
458 }
459
460 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
461 wcn36xx_dxe_write_register(wcn,
462 WCN36XX_DXE_0_INT_DONE_CLR,
463 WCN36XX_INT_MASK_CHAN_TX_L);
464 }
465
466 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
467 wcn36xx_dxe_write_register(wcn,
468 WCN36XX_DXE_0_INT_ED_CLR,
469 WCN36XX_INT_MASK_CHAN_TX_L);
470 }
471
472 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
473 int_reason);
474
475 if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
476 WCN36XX_CH_STAT_INT_ED_MASK))
477 reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
478 }
479
480 return IRQ_HANDLED;
481 }
482
wcn36xx_irq_rx_ready(int irq,void * dev)483 static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
484 {
485 struct wcn36xx *wcn = (struct wcn36xx *)dev;
486
487 wcn36xx_dxe_rx_frame(wcn);
488
489 return IRQ_HANDLED;
490 }
491
wcn36xx_dxe_request_irqs(struct wcn36xx * wcn)492 static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
493 {
494 int ret;
495
496 ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
497 IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
498 if (ret) {
499 wcn36xx_err("failed to alloc tx irq\n");
500 goto out_err;
501 }
502
503 ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
504 "wcn36xx_rx", wcn);
505 if (ret) {
506 wcn36xx_err("failed to alloc rx irq\n");
507 goto out_txirq;
508 }
509
510 enable_irq_wake(wcn->rx_irq);
511
512 return 0;
513
514 out_txirq:
515 free_irq(wcn->tx_irq, wcn);
516 out_err:
517 return ret;
518
519 }
520
wcn36xx_rx_handle_packets(struct wcn36xx * wcn,struct wcn36xx_dxe_ch * ch,u32 ctrl,u32 en_mask,u32 int_mask,u32 status_reg)521 static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
522 struct wcn36xx_dxe_ch *ch,
523 u32 ctrl,
524 u32 en_mask,
525 u32 int_mask,
526 u32 status_reg)
527 {
528 struct wcn36xx_dxe_desc *dxe;
529 struct wcn36xx_dxe_ctl *ctl;
530 dma_addr_t dma_addr;
531 struct sk_buff *skb;
532 u32 int_reason;
533 int ret;
534
535 wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
536 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
537
538 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
539 wcn36xx_dxe_write_register(wcn,
540 WCN36XX_DXE_0_INT_ERR_CLR,
541 int_mask);
542
543 wcn36xx_err("DXE IRQ reported error on RX channel\n");
544 }
545
546 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
547 wcn36xx_dxe_write_register(wcn,
548 WCN36XX_DXE_0_INT_DONE_CLR,
549 int_mask);
550
551 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
552 wcn36xx_dxe_write_register(wcn,
553 WCN36XX_DXE_0_INT_ED_CLR,
554 int_mask);
555
556 if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
557 WCN36XX_CH_STAT_INT_ED_MASK)))
558 return 0;
559
560 spin_lock(&ch->lock);
561
562 ctl = ch->head_blk_ctl;
563 dxe = ctl->desc;
564
565 while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
566 skb = ctl->skb;
567 dma_addr = dxe->dst_addr_l;
568 ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
569 if (0 == ret) {
570 /* new skb allocation ok. Use the new one and queue
571 * the old one to network system.
572 */
573 dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
574 DMA_FROM_DEVICE);
575 wcn36xx_rx_skb(wcn, skb);
576 } /* else keep old skb not submitted and use it for rx DMA */
577
578 dxe->ctrl = ctrl;
579 ctl = ctl->next;
580 dxe = ctl->desc;
581 }
582 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
583
584 ch->head_blk_ctl = ctl;
585
586 spin_unlock(&ch->lock);
587
588 return 0;
589 }
590
wcn36xx_dxe_rx_frame(struct wcn36xx * wcn)591 void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
592 {
593 int int_src;
594
595 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
596
597 /* RX_LOW_PRI */
598 if (int_src & WCN36XX_DXE_INT_CH1_MASK)
599 wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
600 WCN36XX_DXE_CTRL_RX_L,
601 WCN36XX_DXE_INT_CH1_MASK,
602 WCN36XX_INT_MASK_CHAN_RX_L,
603 WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
604
605 /* RX_HIGH_PRI */
606 if (int_src & WCN36XX_DXE_INT_CH3_MASK)
607 wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
608 WCN36XX_DXE_CTRL_RX_H,
609 WCN36XX_DXE_INT_CH3_MASK,
610 WCN36XX_INT_MASK_CHAN_RX_H,
611 WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
612
613 if (!int_src)
614 wcn36xx_warn("No DXE interrupt pending\n");
615 }
616
wcn36xx_dxe_allocate_mem_pools(struct wcn36xx * wcn)617 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
618 {
619 size_t s;
620 void *cpu_addr;
621
622 /* Allocate BD headers for MGMT frames */
623
624 /* Where this come from ask QC */
625 wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
626 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
627
628 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
629 cpu_addr = dma_alloc_coherent(wcn->dev, s,
630 &wcn->mgmt_mem_pool.phy_addr,
631 GFP_KERNEL);
632 if (!cpu_addr)
633 goto out_err;
634
635 wcn->mgmt_mem_pool.virt_addr = cpu_addr;
636
637 /* Allocate BD headers for DATA frames */
638
639 /* Where this come from ask QC */
640 wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
641 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
642
643 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
644 cpu_addr = dma_alloc_coherent(wcn->dev, s,
645 &wcn->data_mem_pool.phy_addr,
646 GFP_KERNEL);
647 if (!cpu_addr)
648 goto out_err;
649
650 wcn->data_mem_pool.virt_addr = cpu_addr;
651
652 return 0;
653
654 out_err:
655 wcn36xx_dxe_free_mem_pools(wcn);
656 wcn36xx_err("Failed to allocate BD mempool\n");
657 return -ENOMEM;
658 }
659
wcn36xx_dxe_free_mem_pools(struct wcn36xx * wcn)660 void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
661 {
662 if (wcn->mgmt_mem_pool.virt_addr)
663 dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
664 WCN36XX_DXE_CH_DESC_NUMB_TX_H,
665 wcn->mgmt_mem_pool.virt_addr,
666 wcn->mgmt_mem_pool.phy_addr);
667
668 if (wcn->data_mem_pool.virt_addr) {
669 dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
670 WCN36XX_DXE_CH_DESC_NUMB_TX_L,
671 wcn->data_mem_pool.virt_addr,
672 wcn->data_mem_pool.phy_addr);
673 }
674 }
675
wcn36xx_dxe_tx_frame(struct wcn36xx * wcn,struct wcn36xx_vif * vif_priv,struct wcn36xx_tx_bd * bd,struct sk_buff * skb,bool is_low)676 int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
677 struct wcn36xx_vif *vif_priv,
678 struct wcn36xx_tx_bd *bd,
679 struct sk_buff *skb,
680 bool is_low)
681 {
682 struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
683 struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
684 struct wcn36xx_dxe_ch *ch = NULL;
685 unsigned long flags;
686 int ret;
687
688 ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
689
690 spin_lock_irqsave(&ch->lock, flags);
691 ctl_bd = ch->head_blk_ctl;
692 ctl_skb = ctl_bd->next;
693
694 /*
695 * If skb is not null that means that we reached the tail of the ring
696 * hence ring is full. Stop queues to let mac80211 back off until ring
697 * has an empty slot again.
698 */
699 if (NULL != ctl_skb->skb) {
700 ieee80211_stop_queues(wcn->hw);
701 wcn->queues_stopped = true;
702 spin_unlock_irqrestore(&ch->lock, flags);
703 return -EBUSY;
704 }
705
706 if (unlikely(ctl_skb->bd_cpu_addr)) {
707 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
708 ret = -EINVAL;
709 goto unlock;
710 }
711
712 desc_bd = ctl_bd->desc;
713 desc_skb = ctl_skb->desc;
714
715 ctl_bd->skb = NULL;
716
717 /* write buffer descriptor */
718 memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
719
720 /* Set source address of the BD we send */
721 desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
722 desc_bd->dst_addr_l = ch->dxe_wq;
723 desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
724
725 wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
726
727 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
728 (char *)desc_bd, sizeof(*desc_bd));
729 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
730 "BD >>> ", (char *)ctl_bd->bd_cpu_addr,
731 sizeof(struct wcn36xx_tx_bd));
732
733 desc_skb->src_addr_l = dma_map_single(wcn->dev,
734 skb->data,
735 skb->len,
736 DMA_TO_DEVICE);
737 if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
738 dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
739 ret = -ENOMEM;
740 goto unlock;
741 }
742
743 ctl_skb->skb = skb;
744 desc_skb->dst_addr_l = ch->dxe_wq;
745 desc_skb->fr_len = ctl_skb->skb->len;
746
747 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
748 (char *)desc_skb, sizeof(*desc_skb));
749 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
750 (char *)ctl_skb->skb->data, ctl_skb->skb->len);
751
752 /* Move the head of the ring to the next empty descriptor */
753 ch->head_blk_ctl = ctl_skb->next;
754
755 /* Commit all previous writes and set descriptors to VALID */
756 wmb();
757 desc_skb->ctrl = ch->ctrl_skb;
758 wmb();
759 desc_bd->ctrl = ch->ctrl_bd;
760
761 /*
762 * When connected and trying to send data frame chip can be in sleep
763 * mode and writing to the register will not wake up the chip. Instead
764 * notify chip about new frame through SMSM bus.
765 */
766 if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
767 qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
768 WCN36XX_SMSM_WLAN_TX_ENABLE,
769 WCN36XX_SMSM_WLAN_TX_ENABLE);
770 } else {
771 /* indicate End Of Packet and generate interrupt on descriptor
772 * done.
773 */
774 wcn36xx_dxe_write_register(wcn,
775 ch->reg_ctrl, ch->def_ctrl);
776 }
777
778 ret = 0;
779 unlock:
780 spin_unlock_irqrestore(&ch->lock, flags);
781 return ret;
782 }
783
wcn36xx_dxe_init(struct wcn36xx * wcn)784 int wcn36xx_dxe_init(struct wcn36xx *wcn)
785 {
786 int reg_data = 0, ret;
787
788 reg_data = WCN36XX_DXE_REG_RESET;
789 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
790
791 /* Select channels for rx avail and xfer done interrupts... */
792 reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
793 WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
794 if (wcn->is_pronto)
795 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
796 else
797 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
798
799 /***************************************/
800 /* Init descriptors for TX LOW channel */
801 /***************************************/
802 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
803 if (ret) {
804 dev_err(wcn->dev, "Error allocating descriptor\n");
805 return ret;
806 }
807 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
808
809 /* Write channel head to a NEXT register */
810 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
811 wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
812
813 /* Program DMA destination addr for TX LOW */
814 wcn36xx_dxe_write_register(wcn,
815 WCN36XX_DXE_CH_DEST_ADDR_TX_L,
816 WCN36XX_DXE_WQ_TX_L);
817
818 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data);
819 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
820
821 /***************************************/
822 /* Init descriptors for TX HIGH channel */
823 /***************************************/
824 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
825 if (ret) {
826 dev_err(wcn->dev, "Error allocating descriptor\n");
827 goto out_err_txh_ch;
828 }
829
830 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
831
832 /* Write channel head to a NEXT register */
833 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
834 wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
835
836 /* Program DMA destination addr for TX HIGH */
837 wcn36xx_dxe_write_register(wcn,
838 WCN36XX_DXE_CH_DEST_ADDR_TX_H,
839 WCN36XX_DXE_WQ_TX_H);
840
841 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data);
842
843 /* Enable channel interrupts */
844 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
845
846 /***************************************/
847 /* Init descriptors for RX LOW channel */
848 /***************************************/
849 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
850 if (ret) {
851 dev_err(wcn->dev, "Error allocating descriptor\n");
852 goto out_err_rxl_ch;
853 }
854
855
856 /* For RX we need to preallocated buffers */
857 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
858
859 /* Write channel head to a NEXT register */
860 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
861 wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
862
863 /* Write DMA source address */
864 wcn36xx_dxe_write_register(wcn,
865 WCN36XX_DXE_CH_SRC_ADDR_RX_L,
866 WCN36XX_DXE_WQ_RX_L);
867
868 /* Program preallocated destination address */
869 wcn36xx_dxe_write_register(wcn,
870 WCN36XX_DXE_CH_DEST_ADDR_RX_L,
871 wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
872
873 /* Enable default control registers */
874 wcn36xx_dxe_write_register(wcn,
875 WCN36XX_DXE_REG_CTL_RX_L,
876 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
877
878 /* Enable channel interrupts */
879 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
880
881 /***************************************/
882 /* Init descriptors for RX HIGH channel */
883 /***************************************/
884 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
885 if (ret) {
886 dev_err(wcn->dev, "Error allocating descriptor\n");
887 goto out_err_rxh_ch;
888 }
889
890 /* For RX we need to prealocat buffers */
891 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
892
893 /* Write chanel head to a NEXT register */
894 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
895 wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
896
897 /* Write DMA source address */
898 wcn36xx_dxe_write_register(wcn,
899 WCN36XX_DXE_CH_SRC_ADDR_RX_H,
900 WCN36XX_DXE_WQ_RX_H);
901
902 /* Program preallocated destination address */
903 wcn36xx_dxe_write_register(wcn,
904 WCN36XX_DXE_CH_DEST_ADDR_RX_H,
905 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
906
907 /* Enable default control registers */
908 wcn36xx_dxe_write_register(wcn,
909 WCN36XX_DXE_REG_CTL_RX_H,
910 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
911
912 /* Enable channel interrupts */
913 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
914
915 ret = wcn36xx_dxe_request_irqs(wcn);
916 if (ret < 0)
917 goto out_err_irq;
918
919 return 0;
920
921 out_err_irq:
922 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
923 out_err_rxh_ch:
924 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
925 out_err_rxl_ch:
926 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
927 out_err_txh_ch:
928 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
929
930 return ret;
931 }
932
wcn36xx_dxe_deinit(struct wcn36xx * wcn)933 void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
934 {
935 free_irq(wcn->tx_irq, wcn);
936 free_irq(wcn->rx_irq, wcn);
937
938 if (wcn->tx_ack_skb) {
939 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
940 wcn->tx_ack_skb = NULL;
941 }
942
943 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
944 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
945 }
946