1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 */
5
6 #include <linux/module.h>
7 #include "mt76.h"
8 #include "usb_trace.h"
9 #include "dma.h"
10
11 #define MT_VEND_REQ_MAX_RETRY 10
12 #define MT_VEND_REQ_TOUT_MS 300
13
14 static bool disable_usb_sg;
15 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
16 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
17
18 /* should be called with usb_ctrl_mtx locked */
__mt76u_vendor_request(struct mt76_dev * dev,u8 req,u8 req_type,u16 val,u16 offset,void * buf,size_t len)19 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
20 u8 req_type, u16 val, u16 offset,
21 void *buf, size_t len)
22 {
23 struct usb_device *udev = to_usb_device(dev->dev);
24 unsigned int pipe;
25 int i, ret;
26
27 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
28 : usb_sndctrlpipe(udev, 0);
29 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
30 if (test_bit(MT76_REMOVED, &dev->state))
31 return -EIO;
32
33 ret = usb_control_msg(udev, pipe, req, req_type, val,
34 offset, buf, len, MT_VEND_REQ_TOUT_MS);
35 if (ret == -ENODEV)
36 set_bit(MT76_REMOVED, &dev->state);
37 if (ret >= 0 || ret == -ENODEV)
38 return ret;
39 usleep_range(5000, 10000);
40 }
41
42 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
43 req, offset, ret);
44 return ret;
45 }
46
mt76u_vendor_request(struct mt76_dev * dev,u8 req,u8 req_type,u16 val,u16 offset,void * buf,size_t len)47 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
48 u8 req_type, u16 val, u16 offset,
49 void *buf, size_t len)
50 {
51 int ret;
52
53 mutex_lock(&dev->usb.usb_ctrl_mtx);
54 ret = __mt76u_vendor_request(dev, req, req_type,
55 val, offset, buf, len);
56 trace_usb_reg_wr(dev, offset, val);
57 mutex_unlock(&dev->usb.usb_ctrl_mtx);
58
59 return ret;
60 }
61 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
62
63 /* should be called with usb_ctrl_mtx locked */
__mt76u_rr(struct mt76_dev * dev,u32 addr)64 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
65 {
66 struct mt76_usb *usb = &dev->usb;
67 u32 data = ~0;
68 u16 offset;
69 int ret;
70 u8 req;
71
72 switch (addr & MT_VEND_TYPE_MASK) {
73 case MT_VEND_TYPE_EEPROM:
74 req = MT_VEND_READ_EEPROM;
75 break;
76 case MT_VEND_TYPE_CFG:
77 req = MT_VEND_READ_CFG;
78 break;
79 default:
80 req = MT_VEND_MULTI_READ;
81 break;
82 }
83 offset = addr & ~MT_VEND_TYPE_MASK;
84
85 ret = __mt76u_vendor_request(dev, req,
86 USB_DIR_IN | USB_TYPE_VENDOR,
87 0, offset, &usb->reg_val, sizeof(__le32));
88 if (ret == sizeof(__le32))
89 data = le32_to_cpu(usb->reg_val);
90 trace_usb_reg_rr(dev, addr, data);
91
92 return data;
93 }
94
mt76u_rr(struct mt76_dev * dev,u32 addr)95 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
96 {
97 u32 ret;
98
99 mutex_lock(&dev->usb.usb_ctrl_mtx);
100 ret = __mt76u_rr(dev, addr);
101 mutex_unlock(&dev->usb.usb_ctrl_mtx);
102
103 return ret;
104 }
105
106 /* should be called with usb_ctrl_mtx locked */
__mt76u_wr(struct mt76_dev * dev,u32 addr,u32 val)107 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
108 {
109 struct mt76_usb *usb = &dev->usb;
110 u16 offset;
111 u8 req;
112
113 switch (addr & MT_VEND_TYPE_MASK) {
114 case MT_VEND_TYPE_CFG:
115 req = MT_VEND_WRITE_CFG;
116 break;
117 default:
118 req = MT_VEND_MULTI_WRITE;
119 break;
120 }
121 offset = addr & ~MT_VEND_TYPE_MASK;
122
123 usb->reg_val = cpu_to_le32(val);
124 __mt76u_vendor_request(dev, req,
125 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
126 offset, &usb->reg_val, sizeof(__le32));
127 trace_usb_reg_wr(dev, addr, val);
128 }
129
mt76u_wr(struct mt76_dev * dev,u32 addr,u32 val)130 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
131 {
132 mutex_lock(&dev->usb.usb_ctrl_mtx);
133 __mt76u_wr(dev, addr, val);
134 mutex_unlock(&dev->usb.usb_ctrl_mtx);
135 }
136
mt76u_rmw(struct mt76_dev * dev,u32 addr,u32 mask,u32 val)137 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
138 u32 mask, u32 val)
139 {
140 mutex_lock(&dev->usb.usb_ctrl_mtx);
141 val |= __mt76u_rr(dev, addr) & ~mask;
142 __mt76u_wr(dev, addr, val);
143 mutex_unlock(&dev->usb.usb_ctrl_mtx);
144
145 return val;
146 }
147
mt76u_copy(struct mt76_dev * dev,u32 offset,const void * data,int len)148 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
149 const void *data, int len)
150 {
151 struct mt76_usb *usb = &dev->usb;
152 const u32 *val = data;
153 int i, ret;
154
155 mutex_lock(&usb->usb_ctrl_mtx);
156 for (i = 0; i < DIV_ROUND_UP(len, 4); i++) {
157 put_unaligned(val[i], (u32 *)usb->data);
158 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
159 USB_DIR_OUT | USB_TYPE_VENDOR,
160 0, offset + i * 4, usb->data,
161 sizeof(u32));
162 if (ret < 0)
163 break;
164 }
165 mutex_unlock(&usb->usb_ctrl_mtx);
166 }
167
mt76u_single_wr(struct mt76_dev * dev,const u8 req,const u16 offset,const u32 val)168 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
169 const u16 offset, const u32 val)
170 {
171 mutex_lock(&dev->usb.usb_ctrl_mtx);
172 __mt76u_vendor_request(dev, req,
173 USB_DIR_OUT | USB_TYPE_VENDOR,
174 val & 0xffff, offset, NULL, 0);
175 __mt76u_vendor_request(dev, req,
176 USB_DIR_OUT | USB_TYPE_VENDOR,
177 val >> 16, offset + 2, NULL, 0);
178 mutex_unlock(&dev->usb.usb_ctrl_mtx);
179 }
180 EXPORT_SYMBOL_GPL(mt76u_single_wr);
181
182 static int
mt76u_req_wr_rp(struct mt76_dev * dev,u32 base,const struct mt76_reg_pair * data,int len)183 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
184 const struct mt76_reg_pair *data, int len)
185 {
186 struct mt76_usb *usb = &dev->usb;
187
188 mutex_lock(&usb->usb_ctrl_mtx);
189 while (len > 0) {
190 __mt76u_wr(dev, base + data->reg, data->value);
191 len--;
192 data++;
193 }
194 mutex_unlock(&usb->usb_ctrl_mtx);
195
196 return 0;
197 }
198
199 static int
mt76u_wr_rp(struct mt76_dev * dev,u32 base,const struct mt76_reg_pair * data,int n)200 mt76u_wr_rp(struct mt76_dev *dev, u32 base,
201 const struct mt76_reg_pair *data, int n)
202 {
203 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
204 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
205 else
206 return mt76u_req_wr_rp(dev, base, data, n);
207 }
208
209 static int
mt76u_req_rd_rp(struct mt76_dev * dev,u32 base,struct mt76_reg_pair * data,int len)210 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
211 int len)
212 {
213 struct mt76_usb *usb = &dev->usb;
214
215 mutex_lock(&usb->usb_ctrl_mtx);
216 while (len > 0) {
217 data->value = __mt76u_rr(dev, base + data->reg);
218 len--;
219 data++;
220 }
221 mutex_unlock(&usb->usb_ctrl_mtx);
222
223 return 0;
224 }
225
226 static int
mt76u_rd_rp(struct mt76_dev * dev,u32 base,struct mt76_reg_pair * data,int n)227 mt76u_rd_rp(struct mt76_dev *dev, u32 base,
228 struct mt76_reg_pair *data, int n)
229 {
230 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
231 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
232 else
233 return mt76u_req_rd_rp(dev, base, data, n);
234 }
235
mt76u_check_sg(struct mt76_dev * dev)236 static bool mt76u_check_sg(struct mt76_dev *dev)
237 {
238 struct usb_device *udev = to_usb_device(dev->dev);
239
240 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
241 (udev->bus->no_sg_constraint ||
242 udev->speed == USB_SPEED_WIRELESS));
243 }
244
245 static int
mt76u_set_endpoints(struct usb_interface * intf,struct mt76_usb * usb)246 mt76u_set_endpoints(struct usb_interface *intf,
247 struct mt76_usb *usb)
248 {
249 struct usb_host_interface *intf_desc = intf->cur_altsetting;
250 struct usb_endpoint_descriptor *ep_desc;
251 int i, in_ep = 0, out_ep = 0;
252
253 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
254 ep_desc = &intf_desc->endpoint[i].desc;
255
256 if (usb_endpoint_is_bulk_in(ep_desc) &&
257 in_ep < __MT_EP_IN_MAX) {
258 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
259 in_ep++;
260 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
261 out_ep < __MT_EP_OUT_MAX) {
262 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
263 out_ep++;
264 }
265 }
266
267 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
268 return -EINVAL;
269 return 0;
270 }
271
272 static int
mt76u_fill_rx_sg(struct mt76_dev * dev,struct mt76_queue * q,struct urb * urb,int nsgs,gfp_t gfp)273 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
274 int nsgs, gfp_t gfp)
275 {
276 int i;
277
278 for (i = 0; i < nsgs; i++) {
279 struct page *page;
280 void *data;
281 int offset;
282
283 data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
284 if (!data)
285 break;
286
287 page = virt_to_head_page(data);
288 offset = data - page_address(page);
289 sg_set_page(&urb->sg[i], page, q->buf_size, offset);
290 }
291
292 if (i < nsgs) {
293 int j;
294
295 for (j = nsgs; j < urb->num_sgs; j++)
296 skb_free_frag(sg_virt(&urb->sg[j]));
297 urb->num_sgs = i;
298 }
299
300 urb->num_sgs = max_t(int, i, urb->num_sgs);
301 urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
302 sg_init_marker(urb->sg, urb->num_sgs);
303
304 return i ? : -ENOMEM;
305 }
306
307 static int
mt76u_refill_rx(struct mt76_dev * dev,struct urb * urb,int nsgs,gfp_t gfp)308 mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
309 {
310 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
311
312 if (dev->usb.sg_en)
313 return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
314
315 urb->transfer_buffer_length = q->buf_size;
316 urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
317
318 return urb->transfer_buffer ? 0 : -ENOMEM;
319 }
320
321 static int
mt76u_urb_alloc(struct mt76_dev * dev,struct mt76_queue_entry * e,int sg_max_size)322 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
323 int sg_max_size)
324 {
325 unsigned int size = sizeof(struct urb);
326
327 if (dev->usb.sg_en)
328 size += sg_max_size * sizeof(struct scatterlist);
329
330 e->urb = kzalloc(size, GFP_KERNEL);
331 if (!e->urb)
332 return -ENOMEM;
333
334 usb_init_urb(e->urb);
335
336 if (dev->usb.sg_en)
337 e->urb->sg = (struct scatterlist *)(e->urb + 1);
338
339 return 0;
340 }
341
342 static int
mt76u_rx_urb_alloc(struct mt76_dev * dev,struct mt76_queue_entry * e)343 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
344 {
345 int err;
346
347 err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE);
348 if (err)
349 return err;
350
351 return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE,
352 GFP_KERNEL);
353 }
354
mt76u_urb_free(struct urb * urb)355 static void mt76u_urb_free(struct urb *urb)
356 {
357 int i;
358
359 for (i = 0; i < urb->num_sgs; i++)
360 skb_free_frag(sg_virt(&urb->sg[i]));
361
362 if (urb->transfer_buffer)
363 skb_free_frag(urb->transfer_buffer);
364
365 usb_free_urb(urb);
366 }
367
368 static void
mt76u_fill_bulk_urb(struct mt76_dev * dev,int dir,int index,struct urb * urb,usb_complete_t complete_fn,void * context)369 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
370 struct urb *urb, usb_complete_t complete_fn,
371 void *context)
372 {
373 struct usb_device *udev = to_usb_device(dev->dev);
374 unsigned int pipe;
375
376 if (dir == USB_DIR_IN)
377 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
378 else
379 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
380
381 urb->dev = udev;
382 urb->pipe = pipe;
383 urb->complete = complete_fn;
384 urb->context = context;
385 }
386
387 static inline struct urb *
mt76u_get_next_rx_entry(struct mt76_dev * dev)388 mt76u_get_next_rx_entry(struct mt76_dev *dev)
389 {
390 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
391 struct urb *urb = NULL;
392 unsigned long flags;
393
394 spin_lock_irqsave(&q->lock, flags);
395 if (q->queued > 0) {
396 urb = q->entry[q->head].urb;
397 q->head = (q->head + 1) % q->ndesc;
398 q->queued--;
399 }
400 spin_unlock_irqrestore(&q->lock, flags);
401
402 return urb;
403 }
404
mt76u_get_rx_entry_len(u8 * data,u32 data_len)405 static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
406 {
407 u16 dma_len, min_len;
408
409 dma_len = get_unaligned_le16(data);
410 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
411 MT_FCE_INFO_LEN;
412
413 if (data_len < min_len || !dma_len ||
414 dma_len + MT_DMA_HDR_LEN > data_len ||
415 (dma_len & 0x3))
416 return -EINVAL;
417 return dma_len;
418 }
419
420 static struct sk_buff *
mt76u_build_rx_skb(void * data,int len,int buf_size)421 mt76u_build_rx_skb(void *data, int len, int buf_size)
422 {
423 struct sk_buff *skb;
424
425 if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
426 struct page *page;
427
428 /* slow path, not enough space for data and
429 * skb_shared_info
430 */
431 skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
432 if (!skb)
433 return NULL;
434
435 skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
436 data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
437 page = virt_to_head_page(data);
438 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
439 page, data - page_address(page),
440 len - MT_SKB_HEAD_LEN, buf_size);
441
442 return skb;
443 }
444
445 /* fast path */
446 skb = build_skb(data, buf_size);
447 if (!skb)
448 return NULL;
449
450 skb_reserve(skb, MT_DMA_HDR_LEN);
451 __skb_put(skb, len);
452
453 return skb;
454 }
455
456 static int
mt76u_process_rx_entry(struct mt76_dev * dev,struct urb * urb)457 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
458 {
459 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
460 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
461 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
462 int len, nsgs = 1;
463 struct sk_buff *skb;
464
465 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
466 return 0;
467
468 len = mt76u_get_rx_entry_len(data, urb->actual_length);
469 if (len < 0)
470 return 0;
471
472 data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
473 skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
474 if (!skb)
475 return 0;
476
477 len -= data_len;
478 while (len > 0 && nsgs < urb->num_sgs) {
479 data_len = min_t(int, len, urb->sg[nsgs].length);
480 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
481 sg_page(&urb->sg[nsgs]),
482 urb->sg[nsgs].offset,
483 data_len, q->buf_size);
484 len -= data_len;
485 nsgs++;
486 }
487 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
488
489 return nsgs;
490 }
491
mt76u_complete_rx(struct urb * urb)492 static void mt76u_complete_rx(struct urb *urb)
493 {
494 struct mt76_dev *dev = urb->context;
495 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
496 unsigned long flags;
497
498 trace_rx_urb(dev, urb);
499
500 switch (urb->status) {
501 case -ECONNRESET:
502 case -ESHUTDOWN:
503 case -ENOENT:
504 return;
505 default:
506 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
507 urb->status);
508 /* fall through */
509 case 0:
510 break;
511 }
512
513 spin_lock_irqsave(&q->lock, flags);
514 if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
515 goto out;
516
517 q->tail = (q->tail + 1) % q->ndesc;
518 q->queued++;
519 tasklet_schedule(&dev->usb.rx_tasklet);
520 out:
521 spin_unlock_irqrestore(&q->lock, flags);
522 }
523
524 static int
mt76u_submit_rx_buf(struct mt76_dev * dev,struct urb * urb)525 mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
526 {
527 mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
528 mt76u_complete_rx, dev);
529 trace_submit_urb(dev, urb);
530
531 return usb_submit_urb(urb, GFP_ATOMIC);
532 }
533
mt76u_rx_tasklet(unsigned long data)534 static void mt76u_rx_tasklet(unsigned long data)
535 {
536 struct mt76_dev *dev = (struct mt76_dev *)data;
537 struct urb *urb;
538 int err, count;
539
540 rcu_read_lock();
541
542 while (true) {
543 urb = mt76u_get_next_rx_entry(dev);
544 if (!urb)
545 break;
546
547 count = mt76u_process_rx_entry(dev, urb);
548 if (count > 0) {
549 err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
550 if (err < 0)
551 break;
552 }
553 mt76u_submit_rx_buf(dev, urb);
554 }
555 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
556
557 rcu_read_unlock();
558 }
559
mt76u_submit_rx_buffers(struct mt76_dev * dev)560 static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
561 {
562 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
563 unsigned long flags;
564 int i, err = 0;
565
566 spin_lock_irqsave(&q->lock, flags);
567 for (i = 0; i < q->ndesc; i++) {
568 err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
569 if (err < 0)
570 break;
571 }
572 q->head = q->tail = 0;
573 q->queued = 0;
574 spin_unlock_irqrestore(&q->lock, flags);
575
576 return err;
577 }
578
mt76u_alloc_rx(struct mt76_dev * dev)579 static int mt76u_alloc_rx(struct mt76_dev *dev)
580 {
581 struct mt76_usb *usb = &dev->usb;
582 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
583 int i, err;
584
585 usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
586 if (!usb->mcu.data)
587 return -ENOMEM;
588
589 spin_lock_init(&q->lock);
590 q->entry = devm_kcalloc(dev->dev,
591 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
592 GFP_KERNEL);
593 if (!q->entry)
594 return -ENOMEM;
595
596 q->ndesc = MT_NUM_RX_ENTRIES;
597 q->buf_size = PAGE_SIZE;
598
599 for (i = 0; i < q->ndesc; i++) {
600 err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
601 if (err < 0)
602 return err;
603 }
604
605 return mt76u_submit_rx_buffers(dev);
606 }
607
mt76u_free_rx(struct mt76_dev * dev)608 static void mt76u_free_rx(struct mt76_dev *dev)
609 {
610 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
611 struct page *page;
612 int i;
613
614 for (i = 0; i < q->ndesc; i++)
615 mt76u_urb_free(q->entry[i].urb);
616
617 if (!q->rx_page.va)
618 return;
619
620 page = virt_to_page(q->rx_page.va);
621 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
622 memset(&q->rx_page, 0, sizeof(q->rx_page));
623 }
624
mt76u_stop_rx(struct mt76_dev * dev)625 void mt76u_stop_rx(struct mt76_dev *dev)
626 {
627 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
628 int i;
629
630 for (i = 0; i < q->ndesc; i++)
631 usb_poison_urb(q->entry[i].urb);
632
633 tasklet_kill(&dev->usb.rx_tasklet);
634 }
635 EXPORT_SYMBOL_GPL(mt76u_stop_rx);
636
mt76u_resume_rx(struct mt76_dev * dev)637 int mt76u_resume_rx(struct mt76_dev *dev)
638 {
639 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
640 int i;
641
642 for (i = 0; i < q->ndesc; i++)
643 usb_unpoison_urb(q->entry[i].urb);
644
645 return mt76u_submit_rx_buffers(dev);
646 }
647 EXPORT_SYMBOL_GPL(mt76u_resume_rx);
648
mt76u_tx_tasklet(unsigned long data)649 static void mt76u_tx_tasklet(unsigned long data)
650 {
651 struct mt76_dev *dev = (struct mt76_dev *)data;
652 struct mt76_queue_entry entry;
653 struct mt76_sw_queue *sq;
654 struct mt76_queue *q;
655 bool wake;
656 int i;
657
658 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
659 u32 n_dequeued = 0, n_sw_dequeued = 0;
660
661 sq = &dev->q_tx[i];
662 q = sq->q;
663
664 while (q->queued > n_dequeued) {
665 if (!q->entry[q->head].done)
666 break;
667
668 if (q->entry[q->head].schedule) {
669 q->entry[q->head].schedule = false;
670 n_sw_dequeued++;
671 }
672
673 entry = q->entry[q->head];
674 q->entry[q->head].done = false;
675 q->head = (q->head + 1) % q->ndesc;
676 n_dequeued++;
677
678 dev->drv->tx_complete_skb(dev, i, &entry);
679 }
680
681 spin_lock_bh(&q->lock);
682
683 sq->swq_queued -= n_sw_dequeued;
684 q->queued -= n_dequeued;
685
686 wake = q->stopped && q->queued < q->ndesc - 8;
687 if (wake)
688 q->stopped = false;
689
690 if (!q->queued)
691 wake_up(&dev->tx_wait);
692
693 spin_unlock_bh(&q->lock);
694
695 mt76_txq_schedule(dev, i);
696
697 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
698 ieee80211_queue_delayed_work(dev->hw,
699 &dev->usb.stat_work,
700 msecs_to_jiffies(10));
701
702 if (wake)
703 ieee80211_wake_queue(dev->hw, i);
704 }
705 }
706
mt76u_tx_status_data(struct work_struct * work)707 static void mt76u_tx_status_data(struct work_struct *work)
708 {
709 struct mt76_usb *usb;
710 struct mt76_dev *dev;
711 u8 update = 1;
712 u16 count = 0;
713
714 usb = container_of(work, struct mt76_usb, stat_work.work);
715 dev = container_of(usb, struct mt76_dev, usb);
716
717 while (true) {
718 if (test_bit(MT76_REMOVED, &dev->state))
719 break;
720
721 if (!dev->drv->tx_status_data(dev, &update))
722 break;
723 count++;
724 }
725
726 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
727 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
728 msecs_to_jiffies(10));
729 else
730 clear_bit(MT76_READING_STATS, &dev->state);
731 }
732
mt76u_complete_tx(struct urb * urb)733 static void mt76u_complete_tx(struct urb *urb)
734 {
735 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
736 struct mt76_queue_entry *e = urb->context;
737
738 if (mt76u_urb_error(urb))
739 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
740 e->done = true;
741
742 tasklet_schedule(&dev->tx_tasklet);
743 }
744
745 static int
mt76u_tx_setup_buffers(struct mt76_dev * dev,struct sk_buff * skb,struct urb * urb)746 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
747 struct urb *urb)
748 {
749 urb->transfer_buffer_length = skb->len;
750
751 if (!dev->usb.sg_en) {
752 urb->transfer_buffer = skb->data;
753 return 0;
754 }
755
756 sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
757 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
758 if (!urb->num_sgs)
759 return -ENOMEM;
760
761 return urb->num_sgs;
762 }
763
764 static int
mt76u_tx_queue_skb(struct mt76_dev * dev,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)765 mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
766 struct sk_buff *skb, struct mt76_wcid *wcid,
767 struct ieee80211_sta *sta)
768 {
769 struct mt76_queue *q = dev->q_tx[qid].q;
770 struct mt76_tx_info tx_info = {
771 .skb = skb,
772 };
773 u16 idx = q->tail;
774 int err;
775
776 if (q->queued == q->ndesc)
777 return -ENOSPC;
778
779 skb->prev = skb->next = NULL;
780 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
781 if (err < 0)
782 return err;
783
784 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
785 if (err < 0)
786 return err;
787
788 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
789 q->entry[idx].urb, mt76u_complete_tx,
790 &q->entry[idx]);
791
792 q->tail = (q->tail + 1) % q->ndesc;
793 q->entry[idx].skb = tx_info.skb;
794 q->queued++;
795
796 return idx;
797 }
798
mt76u_tx_kick(struct mt76_dev * dev,struct mt76_queue * q)799 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
800 {
801 struct urb *urb;
802 int err;
803
804 while (q->first != q->tail) {
805 urb = q->entry[q->first].urb;
806
807 trace_submit_urb(dev, urb);
808 err = usb_submit_urb(urb, GFP_ATOMIC);
809 if (err < 0) {
810 if (err == -ENODEV)
811 set_bit(MT76_REMOVED, &dev->state);
812 else
813 dev_err(dev->dev, "tx urb submit failed:%d\n",
814 err);
815 break;
816 }
817 q->first = (q->first + 1) % q->ndesc;
818 }
819 }
820
mt76u_alloc_tx(struct mt76_dev * dev)821 static int mt76u_alloc_tx(struct mt76_dev *dev)
822 {
823 struct mt76_queue *q;
824 int i, j, err;
825
826 for (i = 0; i <= MT_TXQ_PSD; i++) {
827 INIT_LIST_HEAD(&dev->q_tx[i].swq);
828
829 if (i >= IEEE80211_NUM_ACS) {
830 dev->q_tx[i].q = dev->q_tx[0].q;
831 continue;
832 }
833
834 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
835 if (!q)
836 return -ENOMEM;
837
838 spin_lock_init(&q->lock);
839 q->hw_idx = mt76_ac_to_hwq(i);
840 dev->q_tx[i].q = q;
841
842 q->entry = devm_kcalloc(dev->dev,
843 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
844 GFP_KERNEL);
845 if (!q->entry)
846 return -ENOMEM;
847
848 q->ndesc = MT_NUM_TX_ENTRIES;
849 for (j = 0; j < q->ndesc; j++) {
850 err = mt76u_urb_alloc(dev, &q->entry[j],
851 MT_TX_SG_MAX_SIZE);
852 if (err < 0)
853 return err;
854 }
855 }
856 return 0;
857 }
858
mt76u_free_tx(struct mt76_dev * dev)859 static void mt76u_free_tx(struct mt76_dev *dev)
860 {
861 struct mt76_queue *q;
862 int i, j;
863
864 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
865 q = dev->q_tx[i].q;
866 for (j = 0; j < q->ndesc; j++)
867 usb_free_urb(q->entry[j].urb);
868 }
869 }
870
mt76u_stop_tx(struct mt76_dev * dev)871 void mt76u_stop_tx(struct mt76_dev *dev)
872 {
873 struct mt76_queue_entry entry;
874 struct mt76_queue *q;
875 int i, j, ret;
876
877 ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev),
878 HZ / 5);
879 if (!ret) {
880 dev_err(dev->dev, "timed out waiting for pending tx\n");
881
882 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
883 q = dev->q_tx[i].q;
884 for (j = 0; j < q->ndesc; j++)
885 usb_kill_urb(q->entry[j].urb);
886 }
887
888 tasklet_kill(&dev->tx_tasklet);
889
890 /* On device removal we maight queue skb's, but mt76u_tx_kick()
891 * will fail to submit urb, cleanup those skb's manually.
892 */
893 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
894 q = dev->q_tx[i].q;
895
896 /* Assure we are in sync with killed tasklet. */
897 spin_lock_bh(&q->lock);
898 while (q->queued) {
899 entry = q->entry[q->head];
900 q->head = (q->head + 1) % q->ndesc;
901 q->queued--;
902
903 dev->drv->tx_complete_skb(dev, i, &entry);
904 }
905 spin_unlock_bh(&q->lock);
906 }
907 }
908
909 cancel_delayed_work_sync(&dev->usb.stat_work);
910 clear_bit(MT76_READING_STATS, &dev->state);
911
912 mt76_tx_status_check(dev, NULL, true);
913 }
914 EXPORT_SYMBOL_GPL(mt76u_stop_tx);
915
mt76u_queues_deinit(struct mt76_dev * dev)916 void mt76u_queues_deinit(struct mt76_dev *dev)
917 {
918 mt76u_stop_rx(dev);
919 mt76u_stop_tx(dev);
920
921 mt76u_free_rx(dev);
922 mt76u_free_tx(dev);
923 }
924 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
925
mt76u_alloc_queues(struct mt76_dev * dev)926 int mt76u_alloc_queues(struct mt76_dev *dev)
927 {
928 int err;
929
930 err = mt76u_alloc_rx(dev);
931 if (err < 0)
932 return err;
933
934 return mt76u_alloc_tx(dev);
935 }
936 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
937
938 static const struct mt76_queue_ops usb_queue_ops = {
939 .tx_queue_skb = mt76u_tx_queue_skb,
940 .kick = mt76u_tx_kick,
941 };
942
mt76u_init(struct mt76_dev * dev,struct usb_interface * intf)943 int mt76u_init(struct mt76_dev *dev,
944 struct usb_interface *intf)
945 {
946 static const struct mt76_bus_ops mt76u_ops = {
947 .rr = mt76u_rr,
948 .wr = mt76u_wr,
949 .rmw = mt76u_rmw,
950 .write_copy = mt76u_copy,
951 .wr_rp = mt76u_wr_rp,
952 .rd_rp = mt76u_rd_rp,
953 .type = MT76_BUS_USB,
954 };
955 struct mt76_usb *usb = &dev->usb;
956
957 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
958 tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
959 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
960 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
961
962 mutex_init(&usb->mcu.mutex);
963
964 mutex_init(&usb->usb_ctrl_mtx);
965 dev->bus = &mt76u_ops;
966 dev->queue_ops = &usb_queue_ops;
967
968 usb->sg_en = mt76u_check_sg(dev);
969
970 return mt76u_set_endpoints(intf, usb);
971 }
972 EXPORT_SYMBOL_GPL(mt76u_init);
973
974 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
975 MODULE_LICENSE("Dual BSD/GPL");
976