1 /*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/firmware.h>
19 #include <linux/delay.h>
20
21 #include "mt76x2.h"
22 #include "mt76x2_mcu.h"
23 #include "mt76x2_dma.h"
24 #include "mt76x2_eeprom.h"
25
mt76x2_mcu_msg_alloc(const void * data,int len)26 static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
27 {
28 struct sk_buff *skb;
29
30 skb = alloc_skb(len, GFP_KERNEL);
31 if (!skb)
32 return NULL;
33 memcpy(skb_put(skb, len), data, len);
34
35 return skb;
36 }
37
38 static struct sk_buff *
mt76x2_mcu_get_response(struct mt76x2_dev * dev,unsigned long expires)39 mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires)
40 {
41 unsigned long timeout;
42
43 if (!time_is_after_jiffies(expires))
44 return NULL;
45
46 timeout = expires - jiffies;
47 wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q),
48 timeout);
49 return skb_dequeue(&dev->mcu.res_q);
50 }
51
52 static int
mt76x2_mcu_msg_send(struct mt76x2_dev * dev,struct sk_buff * skb,enum mcu_cmd cmd)53 mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb,
54 enum mcu_cmd cmd)
55 {
56 unsigned long expires = jiffies + HZ;
57 int ret;
58 u8 seq;
59
60 if (!skb)
61 return -EINVAL;
62
63 mutex_lock(&dev->mcu.mutex);
64
65 seq = ++dev->mcu.msg_seq & 0xf;
66 if (!seq)
67 seq = ++dev->mcu.msg_seq & 0xf;
68
69 ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
70 if (ret)
71 goto out;
72
73 while (1) {
74 u32 *rxfce;
75 bool check_seq = false;
76
77 skb = mt76x2_mcu_get_response(dev, expires);
78 if (!skb) {
79 dev_err(dev->mt76.dev,
80 "MCU message %d (seq %d) timed out\n", cmd,
81 seq);
82 ret = -ETIMEDOUT;
83 break;
84 }
85
86 rxfce = (u32 *) skb->cb;
87
88 if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
89 check_seq = true;
90
91 dev_kfree_skb(skb);
92 if (check_seq)
93 break;
94 }
95
96 out:
97 mutex_unlock(&dev->mcu.mutex);
98
99 return ret;
100 }
101
102 static int
mt76pci_load_rom_patch(struct mt76x2_dev * dev)103 mt76pci_load_rom_patch(struct mt76x2_dev *dev)
104 {
105 const struct firmware *fw = NULL;
106 struct mt76x2_patch_header *hdr;
107 bool rom_protect = !is_mt7612(dev);
108 int len, ret = 0;
109 __le32 *cur;
110 u32 patch_mask, patch_reg;
111
112 if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
113 dev_err(dev->mt76.dev,
114 "Could not get hardware semaphore for ROM PATCH\n");
115 return -ETIMEDOUT;
116 }
117
118 if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
119 patch_mask = BIT(0);
120 patch_reg = MT_MCU_CLOCK_CTL;
121 } else {
122 patch_mask = BIT(1);
123 patch_reg = MT_MCU_COM_REG0;
124 }
125
126 if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
127 dev_info(dev->mt76.dev, "ROM patch already applied\n");
128 goto out;
129 }
130
131 ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
132 if (ret)
133 goto out;
134
135 if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
136 ret = -EIO;
137 dev_err(dev->mt76.dev, "Failed to load firmware\n");
138 goto out;
139 }
140
141 hdr = (struct mt76x2_patch_header *) fw->data;
142 dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
143
144 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
145
146 cur = (__le32 *) (fw->data + sizeof(*hdr));
147 len = fw->size - sizeof(*hdr);
148 mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
149
150 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
151
152 /* Trigger ROM */
153 mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
154
155 if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
156 dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
157 ret = -ETIMEDOUT;
158 }
159
160 out:
161 /* release semaphore */
162 if (rom_protect)
163 mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
164 release_firmware(fw);
165 return ret;
166 }
167
168 static int
mt76pci_load_firmware(struct mt76x2_dev * dev)169 mt76pci_load_firmware(struct mt76x2_dev *dev)
170 {
171 const struct firmware *fw;
172 const struct mt76x2_fw_header *hdr;
173 int len, ret;
174 __le32 *cur;
175 u32 offset, val;
176
177 ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
178 if (ret)
179 return ret;
180
181 if (!fw || !fw->data || fw->size < sizeof(*hdr))
182 goto error;
183
184 hdr = (const struct mt76x2_fw_header *) fw->data;
185
186 len = sizeof(*hdr);
187 len += le32_to_cpu(hdr->ilm_len);
188 len += le32_to_cpu(hdr->dlm_len);
189
190 if (fw->size != len)
191 goto error;
192
193 val = le16_to_cpu(hdr->fw_ver);
194 dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
195 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
196
197 val = le16_to_cpu(hdr->build_ver);
198 dev_info(dev->mt76.dev, "Build: %x\n", val);
199 dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
200
201 cur = (__le32 *) (fw->data + sizeof(*hdr));
202 len = le32_to_cpu(hdr->ilm_len);
203
204 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
205 mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
206
207 cur += len / sizeof(*cur);
208 len = le32_to_cpu(hdr->dlm_len);
209
210 if (mt76xx_rev(dev) >= MT76XX_REV_E3)
211 offset = MT_MCU_DLM_ADDR_E3;
212 else
213 offset = MT_MCU_DLM_ADDR;
214
215 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
216 mt76_wr_copy(dev, offset, cur, len);
217
218 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
219
220 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
221 if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
222 mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
223
224 /* trigger firmware */
225 mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
226 if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) {
227 dev_err(dev->mt76.dev, "Firmware failed to start\n");
228 release_firmware(fw);
229 return -ETIMEDOUT;
230 }
231
232 dev_info(dev->mt76.dev, "Firmware running!\n");
233
234 release_firmware(fw);
235
236 return ret;
237
238 error:
239 dev_err(dev->mt76.dev, "Invalid firmware\n");
240 release_firmware(fw);
241 return -ENOENT;
242 }
243
244 static int
mt76x2_mcu_function_select(struct mt76x2_dev * dev,enum mcu_function func,u32 val)245 mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
246 u32 val)
247 {
248 struct sk_buff *skb;
249 struct {
250 __le32 id;
251 __le32 value;
252 } __packed __aligned(4) msg = {
253 .id = cpu_to_le32(func),
254 .value = cpu_to_le32(val),
255 };
256
257 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
258 return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP);
259 }
260
mt76x2_mcu_load_cr(struct mt76x2_dev * dev,u8 type,u8 temp_level,u8 channel)261 int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
262 u8 channel)
263 {
264 struct sk_buff *skb;
265 struct {
266 u8 cr_mode;
267 u8 temp;
268 u8 ch;
269 u8 _pad0;
270
271 __le32 cfg;
272 } __packed __aligned(4) msg = {
273 .cr_mode = type,
274 .temp = temp_level,
275 .ch = channel,
276 };
277 u32 val;
278
279 val = BIT(31);
280 val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
281 val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
282 msg.cfg = cpu_to_le32(val);
283
284 /* first set the channel without the extension channel info */
285 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
286 return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR);
287 }
288
mt76x2_mcu_set_channel(struct mt76x2_dev * dev,u8 channel,u8 bw,u8 bw_index,bool scan)289 int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
290 u8 bw_index, bool scan)
291 {
292 struct sk_buff *skb;
293 struct {
294 u8 idx;
295 u8 scan;
296 u8 bw;
297 u8 _pad0;
298
299 __le16 chainmask;
300 u8 ext_chan;
301 u8 _pad1;
302
303 } __packed __aligned(4) msg = {
304 .idx = channel,
305 .scan = scan,
306 .bw = bw,
307 .chainmask = cpu_to_le16(dev->chainmask),
308 };
309
310 /* first set the channel without the extension channel info */
311 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
312 mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
313
314 usleep_range(5000, 10000);
315
316 msg.ext_chan = 0xe0 + bw_index;
317 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
318 return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
319 }
320
mt76x2_mcu_set_radio_state(struct mt76x2_dev * dev,bool on)321 int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
322 {
323 struct sk_buff *skb;
324 struct {
325 __le32 mode;
326 __le32 level;
327 } __packed __aligned(4) msg = {
328 .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
329 .level = cpu_to_le32(0),
330 };
331
332 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
333 return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP);
334 }
335
mt76x2_mcu_calibrate(struct mt76x2_dev * dev,enum mcu_calibration type,u32 param)336 int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
337 u32 param)
338 {
339 struct sk_buff *skb;
340 struct {
341 __le32 id;
342 __le32 value;
343 } __packed __aligned(4) msg = {
344 .id = cpu_to_le32(type),
345 .value = cpu_to_le32(param),
346 };
347 int ret;
348
349 mt76_clear(dev, MT_MCU_COM_REG0, BIT(31));
350
351 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
352 ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
353 if (ret)
354 return ret;
355
356 if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
357 BIT(31), BIT(31), 100)))
358 return -ETIMEDOUT;
359
360 return 0;
361 }
362
mt76x2_mcu_tssi_comp(struct mt76x2_dev * dev,struct mt76x2_tssi_comp * tssi_data)363 int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
364 struct mt76x2_tssi_comp *tssi_data)
365 {
366 struct sk_buff *skb;
367 struct {
368 __le32 id;
369 struct mt76x2_tssi_comp data;
370 } __packed __aligned(4) msg = {
371 .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
372 .data = *tssi_data,
373 };
374
375 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
376 return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
377 }
378
mt76x2_mcu_init_gain(struct mt76x2_dev * dev,u8 channel,u32 gain,bool force)379 int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
380 bool force)
381 {
382 struct sk_buff *skb;
383 struct {
384 __le32 channel;
385 __le32 gain_val;
386 } __packed __aligned(4) msg = {
387 .channel = cpu_to_le32(channel),
388 .gain_val = cpu_to_le32(gain),
389 };
390
391 if (force)
392 msg.channel |= cpu_to_le32(BIT(31));
393
394 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
395 return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP);
396 }
397
mt76x2_mcu_init(struct mt76x2_dev * dev)398 int mt76x2_mcu_init(struct mt76x2_dev *dev)
399 {
400 int ret;
401
402 mutex_init(&dev->mcu.mutex);
403
404 ret = mt76pci_load_rom_patch(dev);
405 if (ret)
406 return ret;
407
408 ret = mt76pci_load_firmware(dev);
409 if (ret)
410 return ret;
411
412 mt76x2_mcu_function_select(dev, Q_SELECT, 1);
413 return 0;
414 }
415
mt76x2_mcu_cleanup(struct mt76x2_dev * dev)416 int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
417 {
418 struct sk_buff *skb;
419
420 mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
421 usleep_range(20000, 30000);
422
423 while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL)
424 dev_kfree_skb(skb);
425
426 return 0;
427 }
428