1 /*
2 * (c) Copyright 2002-2010, Ralink Technology, Inc.
3 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
4 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
5 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/firmware.h>
19 #include <linux/delay.h>
20 #include <linux/usb.h>
21 #include <linux/skbuff.h>
22
23 #include "mt76x0.h"
24 #include "dma.h"
25 #include "mcu.h"
26 #include "usb.h"
27 #include "trace.h"
28
29 #define MCU_FW_URB_MAX_PAYLOAD 0x38f8
30 #define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
31 #define MCU_RESP_URB_SIZE 1024
32
firmware_running(struct mt76x0_dev * dev)33 static inline int firmware_running(struct mt76x0_dev *dev)
34 {
35 return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
36 }
37
skb_put_le32(struct sk_buff * skb,u32 val)38 static inline void skb_put_le32(struct sk_buff *skb, u32 val)
39 {
40 put_unaligned_le32(val, skb_put(skb, 4));
41 }
42
mt76x0_dma_skb_wrap_cmd(struct sk_buff * skb,u8 seq,enum mcu_cmd cmd)43 static inline void mt76x0_dma_skb_wrap_cmd(struct sk_buff *skb,
44 u8 seq, enum mcu_cmd cmd)
45 {
46 WARN_ON(mt76x0_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
47 FIELD_PREP(MT_TXD_CMD_SEQ, seq) |
48 FIELD_PREP(MT_TXD_CMD_TYPE, cmd)));
49 }
50
trace_mt76x0_mcu_msg_send_cs(struct mt76_dev * dev,struct sk_buff * skb,bool need_resp)51 static inline void trace_mt76x0_mcu_msg_send_cs(struct mt76_dev *dev,
52 struct sk_buff *skb, bool need_resp)
53 {
54 u32 i, csum = 0;
55
56 for (i = 0; i < skb->len / 4; i++)
57 csum ^= get_unaligned_le32(skb->data + i * 4);
58
59 trace_mt76x0_mcu_msg_send(dev, skb, csum, need_resp);
60 }
61
62 static struct sk_buff *
mt76x0_mcu_msg_alloc(struct mt76x0_dev * dev,const void * data,int len)63 mt76x0_mcu_msg_alloc(struct mt76x0_dev *dev, const void *data, int len)
64 {
65 struct sk_buff *skb;
66
67 WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
68
69 skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
70 if (skb) {
71 skb_reserve(skb, MT_DMA_HDR_LEN);
72 memcpy(skb_put(skb, len), data, len);
73 }
74 return skb;
75 }
76
mt76x0_read_resp_regs(struct mt76x0_dev * dev,int len)77 static void mt76x0_read_resp_regs(struct mt76x0_dev *dev, int len)
78 {
79 int i;
80 int n = dev->mcu.reg_pairs_len;
81 u8 *buf = dev->mcu.resp.buf;
82
83 buf += 4;
84 len -= 8;
85
86 if (dev->mcu.burst_read) {
87 u32 reg = dev->mcu.reg_pairs[0].reg - dev->mcu.reg_base;
88
89 WARN_ON_ONCE(len/4 != n);
90 for (i = 0; i < n; i++) {
91 u32 val = get_unaligned_le32(buf + 4*i);
92
93 dev->mcu.reg_pairs[i].reg = reg++;
94 dev->mcu.reg_pairs[i].value = val;
95 }
96 } else {
97 WARN_ON_ONCE(len/8 != n);
98 for (i = 0; i < n; i++) {
99 u32 reg = get_unaligned_le32(buf + 8*i) - dev->mcu.reg_base;
100 u32 val = get_unaligned_le32(buf + 8*i + 4);
101
102 WARN_ON_ONCE(dev->mcu.reg_pairs[i].reg != reg);
103 dev->mcu.reg_pairs[i].value = val;
104 }
105 }
106 }
107
mt76x0_mcu_wait_resp(struct mt76x0_dev * dev,u8 seq)108 static int mt76x0_mcu_wait_resp(struct mt76x0_dev *dev, u8 seq)
109 {
110 struct urb *urb = dev->mcu.resp.urb;
111 u32 rxfce;
112 int urb_status, ret, try = 5;
113
114 while (try--) {
115 if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
116 msecs_to_jiffies(300))) {
117 dev_warn(dev->mt76.dev, "Warning: %s retrying\n", __func__);
118 continue;
119 }
120
121 /* Make copies of important data before reusing the urb */
122 rxfce = get_unaligned_le32(dev->mcu.resp.buf);
123 urb_status = urb->status * mt76x0_urb_has_error(urb);
124
125 if (urb_status == 0 && dev->mcu.reg_pairs)
126 mt76x0_read_resp_regs(dev, urb->actual_length);
127
128 ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
129 &dev->mcu.resp, GFP_KERNEL,
130 mt76x0_complete_urb,
131 &dev->mcu.resp_cmpl);
132 if (ret)
133 return ret;
134
135 if (urb_status)
136 dev_err(dev->mt76.dev, "Error: MCU resp urb failed:%d\n",
137 urb_status);
138
139 if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
140 FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
141 return 0;
142
143 dev_err(dev->mt76.dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
144 FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
145 seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
146 }
147
148 dev_err(dev->mt76.dev, "Error: %s timed out\n", __func__);
149 return -ETIMEDOUT;
150 }
151
152 static int
__mt76x0_mcu_msg_send(struct mt76x0_dev * dev,struct sk_buff * skb,enum mcu_cmd cmd,bool wait_resp)153 __mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
154 enum mcu_cmd cmd, bool wait_resp)
155 {
156 struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
157 unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
158 dev->out_ep[MT_EP_OUT_INBAND_CMD]);
159 int sent, ret;
160 u8 seq = 0;
161
162 if (wait_resp)
163 while (!seq)
164 seq = ++dev->mcu.msg_seq & 0xf;
165
166 mt76x0_dma_skb_wrap_cmd(skb, seq, cmd);
167
168 if (dev->mcu.resp_cmpl.done)
169 dev_err(dev->mt76.dev, "Error: MCU response pre-completed!\n");
170
171 trace_mt76x0_mcu_msg_send_cs(&dev->mt76, skb, wait_resp);
172 trace_mt76x0_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len);
173
174 ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
175 if (ret) {
176 dev_err(dev->mt76.dev, "Error: send MCU cmd failed:%d\n", ret);
177 goto out;
178 }
179 if (sent != skb->len)
180 dev_err(dev->mt76.dev, "Error: %s sent != skb->len\n", __func__);
181
182 if (wait_resp)
183 ret = mt76x0_mcu_wait_resp(dev, seq);
184
185 out:
186 return ret;
187 }
188
189 static int
mt76x0_mcu_msg_send(struct mt76x0_dev * dev,struct sk_buff * skb,enum mcu_cmd cmd,bool wait_resp)190 mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
191 enum mcu_cmd cmd, bool wait_resp)
192 {
193 int ret;
194
195 if (test_bit(MT76_REMOVED, &dev->mt76.state))
196 return 0;
197
198 mutex_lock(&dev->mcu.mutex);
199 ret = __mt76x0_mcu_msg_send(dev, skb, cmd, wait_resp);
200 mutex_unlock(&dev->mcu.mutex);
201
202 consume_skb(skb);
203
204 return ret;
205 }
206
mt76x0_mcu_function_select(struct mt76x0_dev * dev,enum mcu_function func,u32 val)207 int mt76x0_mcu_function_select(struct mt76x0_dev *dev,
208 enum mcu_function func, u32 val)
209 {
210 struct sk_buff *skb;
211 struct {
212 __le32 id;
213 __le32 value;
214 } __packed __aligned(4) msg = {
215 .id = cpu_to_le32(func),
216 .value = cpu_to_le32(val),
217 };
218
219 skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
220 if (!skb)
221 return -ENOMEM;
222 return mt76x0_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
223 }
224
225 int
mt76x0_mcu_calibrate(struct mt76x0_dev * dev,enum mcu_calibrate cal,u32 val)226 mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val)
227 {
228 struct sk_buff *skb;
229 struct {
230 __le32 id;
231 __le32 value;
232 } __packed __aligned(4) msg = {
233 .id = cpu_to_le32(cal),
234 .value = cpu_to_le32(val),
235 };
236
237 skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
238 if (!skb)
239 return -ENOMEM;
240 return mt76x0_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
241 }
242
mt76x0_write_reg_pairs(struct mt76x0_dev * dev,u32 base,const struct mt76_reg_pair * data,int n)243 int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
244 const struct mt76_reg_pair *data, int n)
245 {
246 const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
247 struct sk_buff *skb;
248 int cnt, i, ret;
249
250 if (!n)
251 return 0;
252
253 cnt = min(max_vals_per_cmd, n);
254
255 skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
256 if (!skb)
257 return -ENOMEM;
258 skb_reserve(skb, MT_DMA_HDR_LEN);
259
260 for (i = 0; i < cnt; i++) {
261 skb_put_le32(skb, base + data[i].reg);
262 skb_put_le32(skb, data[i].value);
263 }
264
265 ret = mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
266 if (ret)
267 return ret;
268
269 return mt76x0_write_reg_pairs(dev, base, data + cnt, n - cnt);
270 }
271
mt76x0_read_reg_pairs(struct mt76x0_dev * dev,u32 base,struct mt76_reg_pair * data,int n)272 int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
273 struct mt76_reg_pair *data, int n)
274 {
275 const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
276 struct sk_buff *skb;
277 int cnt, i, ret;
278
279 if (!n)
280 return 0;
281
282 cnt = min(max_vals_per_cmd, n);
283 if (cnt != n)
284 return -EINVAL;
285
286 skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
287 if (!skb)
288 return -ENOMEM;
289 skb_reserve(skb, MT_DMA_HDR_LEN);
290
291 for (i = 0; i < cnt; i++) {
292 skb_put_le32(skb, base + data[i].reg);
293 skb_put_le32(skb, data[i].value);
294 }
295
296 mutex_lock(&dev->mcu.mutex);
297
298 dev->mcu.reg_pairs = data;
299 dev->mcu.reg_pairs_len = n;
300 dev->mcu.reg_base = base;
301 dev->mcu.burst_read = false;
302
303 ret = __mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_READ, true);
304
305 dev->mcu.reg_pairs = NULL;
306
307 mutex_unlock(&dev->mcu.mutex);
308
309 consume_skb(skb);
310
311 return ret;
312
313 }
314
mt76x0_burst_write_regs(struct mt76x0_dev * dev,u32 offset,const u32 * data,int n)315 int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
316 const u32 *data, int n)
317 {
318 const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
319 struct sk_buff *skb;
320 int cnt, i, ret;
321
322 if (!n)
323 return 0;
324
325 cnt = min(max_regs_per_cmd, n);
326
327 skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
328 if (!skb)
329 return -ENOMEM;
330 skb_reserve(skb, MT_DMA_HDR_LEN);
331
332 skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
333 for (i = 0; i < cnt; i++)
334 skb_put_le32(skb, data[i]);
335
336 ret = mt76x0_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
337 if (ret)
338 return ret;
339
340 return mt76x0_burst_write_regs(dev, offset + cnt * 4,
341 data + cnt, n - cnt);
342 }
343
344 #if 0
345 static int mt76x0_burst_read_regs(struct mt76x0_dev *dev, u32 base,
346 struct mt76_reg_pair *data, int n)
347 {
348 const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
349 struct sk_buff *skb;
350 int cnt, ret;
351
352 if (!n)
353 return 0;
354
355 cnt = min(max_vals_per_cmd, n);
356 if (cnt != n)
357 return -EINVAL;
358
359 skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
360 if (!skb)
361 return -ENOMEM;
362 skb_reserve(skb, MT_DMA_HDR_LEN);
363
364 skb_put_le32(skb, base + data[0].reg);
365 skb_put_le32(skb, n);
366
367 mutex_lock(&dev->mcu.mutex);
368
369 dev->mcu.reg_pairs = data;
370 dev->mcu.reg_pairs_len = n;
371 dev->mcu.reg_base = base;
372 dev->mcu.burst_read = true;
373
374 ret = __mt76x0_mcu_msg_send(dev, skb, CMD_BURST_READ, true);
375
376 dev->mcu.reg_pairs = NULL;
377
378 mutex_unlock(&dev->mcu.mutex);
379
380 consume_skb(skb);
381
382 return ret;
383 }
384 #endif
385
386 struct mt76_fw_header {
387 __le32 ilm_len;
388 __le32 dlm_len;
389 __le16 build_ver;
390 __le16 fw_ver;
391 u8 pad[4];
392 char build_time[16];
393 };
394
395 struct mt76_fw {
396 struct mt76_fw_header hdr;
397 u8 ivb[MT_MCU_IVB_SIZE];
398 u8 ilm[];
399 };
400
__mt76x0_dma_fw(struct mt76x0_dev * dev,const struct mt76x0_dma_buf * dma_buf,const void * data,u32 len,u32 dst_addr)401 static int __mt76x0_dma_fw(struct mt76x0_dev *dev,
402 const struct mt76x0_dma_buf *dma_buf,
403 const void *data, u32 len, u32 dst_addr)
404 {
405 DECLARE_COMPLETION_ONSTACK(cmpl);
406 struct mt76x0_dma_buf buf = *dma_buf; /* we need to fake length */
407 __le32 reg;
408 u32 val;
409 int ret;
410
411 reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_COMMAND) |
412 FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
413 FIELD_PREP(MT_TXD_INFO_LEN, len));
414 memcpy(buf.buf, ®, sizeof(reg));
415 memcpy(buf.buf + sizeof(reg), data, len);
416 memset(buf.buf + sizeof(reg) + len, 0, 8);
417
418 ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
419 MT_FCE_DMA_ADDR, dst_addr);
420 if (ret)
421 return ret;
422 len = roundup(len, 4);
423 ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
424 MT_FCE_DMA_LEN, len << 16);
425 if (ret)
426 return ret;
427
428 buf.len = MT_DMA_HDR_LEN + len + 4;
429 ret = mt76x0_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
430 &buf, GFP_KERNEL,
431 mt76x0_complete_urb, &cmpl);
432 if (ret)
433 return ret;
434
435 if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
436 dev_err(dev->mt76.dev, "Error: firmware upload timed out\n");
437 usb_kill_urb(buf.urb);
438 return -ETIMEDOUT;
439 }
440 if (mt76x0_urb_has_error(buf.urb)) {
441 dev_err(dev->mt76.dev, "Error: firmware upload urb failed:%d\n",
442 buf.urb->status);
443 return buf.urb->status;
444 }
445
446 val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
447 val++;
448 mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
449
450 msleep(5);
451
452 return 0;
453 }
454
455 static int
mt76x0_dma_fw(struct mt76x0_dev * dev,struct mt76x0_dma_buf * dma_buf,const void * data,int len,u32 dst_addr)456 mt76x0_dma_fw(struct mt76x0_dev *dev, struct mt76x0_dma_buf *dma_buf,
457 const void *data, int len, u32 dst_addr)
458 {
459 int n, ret;
460
461 if (len == 0)
462 return 0;
463
464 n = min(MCU_FW_URB_MAX_PAYLOAD, len);
465 ret = __mt76x0_dma_fw(dev, dma_buf, data, n, dst_addr);
466 if (ret)
467 return ret;
468
469 #if 0
470 if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
471 return -ETIMEDOUT;
472 #endif
473
474 return mt76x0_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
475 }
476
477 static int
mt76x0_upload_firmware(struct mt76x0_dev * dev,const struct mt76_fw * fw)478 mt76x0_upload_firmware(struct mt76x0_dev *dev, const struct mt76_fw *fw)
479 {
480 struct mt76x0_dma_buf dma_buf;
481 void *ivb;
482 u32 ilm_len, dlm_len;
483 int i, ret;
484
485 ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
486 if (!ivb)
487 return -ENOMEM;
488 if (mt76x0_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
489 ret = -ENOMEM;
490 goto error;
491 }
492
493 ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
494 dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %zu\n",
495 ilm_len, sizeof(fw->ivb));
496 ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
497 if (ret)
498 goto error;
499
500 dlm_len = le32_to_cpu(fw->hdr.dlm_len);
501 dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
502 ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
503 dlm_len, MT_MCU_DLM_OFFSET);
504 if (ret)
505 goto error;
506
507 ret = mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
508 0x12, 0, ivb, sizeof(fw->ivb));
509 if (ret < 0)
510 goto error;
511 ret = 0;
512
513 for (i = 100; i && !firmware_running(dev); i--)
514 msleep(10);
515 if (!i) {
516 ret = -ETIMEDOUT;
517 goto error;
518 }
519
520 dev_dbg(dev->mt76.dev, "Firmware running!\n");
521 error:
522 kfree(ivb);
523 mt76x0_usb_free_buf(dev, &dma_buf);
524
525 return ret;
526 }
527
mt76x0_load_firmware(struct mt76x0_dev * dev)528 static int mt76x0_load_firmware(struct mt76x0_dev *dev)
529 {
530 const struct firmware *fw;
531 const struct mt76_fw_header *hdr;
532 int len, ret;
533 u32 val;
534
535 mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
536 MT_USB_DMA_CFG_TX_BULK_EN));
537
538 if (firmware_running(dev))
539 return 0;
540
541 ret = request_firmware(&fw, MT7610_FIRMWARE, dev->mt76.dev);
542 if (ret)
543 return ret;
544
545 if (!fw || !fw->data || fw->size < sizeof(*hdr))
546 goto err_inv_fw;
547
548 hdr = (const struct mt76_fw_header *) fw->data;
549
550 if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
551 goto err_inv_fw;
552
553 len = sizeof(*hdr);
554 len += le32_to_cpu(hdr->ilm_len);
555 len += le32_to_cpu(hdr->dlm_len);
556
557 if (fw->size != len)
558 goto err_inv_fw;
559
560 val = le16_to_cpu(hdr->fw_ver);
561 dev_dbg(dev->mt76.dev,
562 "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
563 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
564 le16_to_cpu(hdr->build_ver), hdr->build_time);
565
566 len = le32_to_cpu(hdr->ilm_len);
567
568 mt76_wr(dev, 0x1004, 0x2c);
569
570 mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
571 MT_USB_DMA_CFG_TX_BULK_EN) |
572 FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
573 mt76x0_vendor_reset(dev);
574 msleep(5);
575 /*
576 mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
577 MT_PBF_CFG_TX1Q_EN |
578 MT_PBF_CFG_TX2Q_EN |
579 MT_PBF_CFG_TX3Q_EN));
580 */
581
582 mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
583
584 /* FCE tx_fs_base_ptr */
585 mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
586 /* FCE tx_fs_max_cnt */
587 mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
588 /* FCE pdma enable */
589 mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
590 /* FCE skip_fs_en */
591 mt76_wr(dev, MT_FCE_SKIP_FS, 3);
592
593 val = mt76_rr(dev, MT_USB_DMA_CFG);
594 val |= MT_USB_DMA_CFG_TX_WL_DROP;
595 mt76_wr(dev, MT_USB_DMA_CFG, val);
596 val &= ~MT_USB_DMA_CFG_TX_WL_DROP;
597 mt76_wr(dev, MT_USB_DMA_CFG, val);
598
599 ret = mt76x0_upload_firmware(dev, (const struct mt76_fw *)fw->data);
600 release_firmware(fw);
601
602 mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
603
604 return ret;
605
606 err_inv_fw:
607 dev_err(dev->mt76.dev, "Invalid firmware image\n");
608 release_firmware(fw);
609 return -ENOENT;
610 }
611
mt76x0_mcu_init(struct mt76x0_dev * dev)612 int mt76x0_mcu_init(struct mt76x0_dev *dev)
613 {
614 int ret;
615
616 mutex_init(&dev->mcu.mutex);
617
618 ret = mt76x0_load_firmware(dev);
619 if (ret)
620 return ret;
621
622 set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
623
624 return 0;
625 }
626
mt76x0_mcu_cmd_init(struct mt76x0_dev * dev)627 int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev)
628 {
629 int ret;
630
631 ret = mt76x0_mcu_function_select(dev, Q_SELECT, 1);
632 if (ret)
633 return ret;
634
635 init_completion(&dev->mcu.resp_cmpl);
636 if (mt76x0_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
637 mt76x0_usb_free_buf(dev, &dev->mcu.resp);
638 return -ENOMEM;
639 }
640
641 ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
642 &dev->mcu.resp, GFP_KERNEL,
643 mt76x0_complete_urb, &dev->mcu.resp_cmpl);
644 if (ret) {
645 mt76x0_usb_free_buf(dev, &dev->mcu.resp);
646 return ret;
647 }
648
649 return 0;
650 }
651
mt76x0_mcu_cmd_deinit(struct mt76x0_dev * dev)652 void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev)
653 {
654 usb_kill_urb(dev->mcu.resp.urb);
655 mt76x0_usb_free_buf(dev, &dev->mcu.resp);
656 }
657