1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2018 MediaTek Inc.
3
4 /*
5 * Bluetooth support for MediaTek serial devices
6 *
7 * Author: Sean Wang <sean.wang@mediatek.com>
8 *
9 */
10
11 #include <asm/unaligned.h>
12 #include <linux/atomic.h>
13 #include <linux/clk.h>
14 #include <linux/firmware.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regulator/consumer.h>
23 #include <linux/serdev.h>
24 #include <linux/skbuff.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28
29 #include "h4_recv.h"
30 #include "btmtk.h"
31
32 #define VERSION "0.2"
33
34 #define MTK_STP_TLR_SIZE 2
35
36 #define BTMTKUART_TX_STATE_ACTIVE 1
37 #define BTMTKUART_TX_STATE_WAKEUP 2
38 #define BTMTKUART_TX_WAIT_VND_EVT 3
39 #define BTMTKUART_REQUIRED_WAKEUP 4
40
41 #define BTMTKUART_FLAG_STANDALONE_HW BIT(0)
42
43 struct mtk_stp_hdr {
44 u8 prefix;
45 __be16 dlen;
46 u8 cs;
47 } __packed;
48
49 struct btmtkuart_data {
50 unsigned int flags;
51 const char *fwname;
52 };
53
54 struct btmtkuart_dev {
55 struct hci_dev *hdev;
56 struct serdev_device *serdev;
57
58 struct clk *clk;
59 struct clk *osc;
60 struct regulator *vcc;
61 struct gpio_desc *reset;
62 struct gpio_desc *boot;
63 struct pinctrl *pinctrl;
64 struct pinctrl_state *pins_runtime;
65 struct pinctrl_state *pins_boot;
66 speed_t desired_speed;
67 speed_t curr_speed;
68
69 struct work_struct tx_work;
70 unsigned long tx_state;
71 struct sk_buff_head txq;
72
73 struct sk_buff *rx_skb;
74 struct sk_buff *evt_skb;
75
76 u8 stp_pad[6];
77 u8 stp_cursor;
78 u16 stp_dlen;
79
80 const struct btmtkuart_data *data;
81 };
82
83 #define btmtkuart_is_standalone(bdev) \
84 ((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
85 #define btmtkuart_is_builtin_soc(bdev) \
86 !((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
87
mtk_hci_wmt_sync(struct hci_dev * hdev,struct btmtk_hci_wmt_params * wmt_params)88 static int mtk_hci_wmt_sync(struct hci_dev *hdev,
89 struct btmtk_hci_wmt_params *wmt_params)
90 {
91 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
92 struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
93 u32 hlen, status = BTMTK_WMT_INVALID;
94 struct btmtk_hci_wmt_evt *wmt_evt;
95 struct btmtk_hci_wmt_cmd *wc;
96 struct btmtk_wmt_hdr *hdr;
97 int err;
98
99 /* Send the WMT command and wait until the WMT event returns */
100 hlen = sizeof(*hdr) + wmt_params->dlen;
101 if (hlen > 255) {
102 err = -EINVAL;
103 goto err_free_skb;
104 }
105
106 wc = kzalloc(hlen, GFP_KERNEL);
107 if (!wc) {
108 err = -ENOMEM;
109 goto err_free_skb;
110 }
111
112 hdr = &wc->hdr;
113 hdr->dir = 1;
114 hdr->op = wmt_params->op;
115 hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
116 hdr->flag = wmt_params->flag;
117 memcpy(wc->data, wmt_params->data, wmt_params->dlen);
118
119 set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
120
121 err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
122 if (err < 0) {
123 clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
124 goto err_free_wc;
125 }
126
127 /* The vendor specific WMT commands are all answered by a vendor
128 * specific event and will not have the Command Status or Command
129 * Complete as with usual HCI command flow control.
130 *
131 * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
132 * state to be cleared. The driver specific event receive routine
133 * will clear that state and with that indicate completion of the
134 * WMT command.
135 */
136 err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
137 TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
138 if (err == -EINTR) {
139 bt_dev_err(hdev, "Execution of wmt command interrupted");
140 clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
141 goto err_free_wc;
142 }
143
144 if (err) {
145 bt_dev_err(hdev, "Execution of wmt command timed out");
146 clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
147 err = -ETIMEDOUT;
148 goto err_free_wc;
149 }
150
151 /* Parse and handle the return WMT event */
152 wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
153 if (wmt_evt->whdr.op != hdr->op) {
154 bt_dev_err(hdev, "Wrong op received %d expected %d",
155 wmt_evt->whdr.op, hdr->op);
156 err = -EIO;
157 goto err_free_wc;
158 }
159
160 switch (wmt_evt->whdr.op) {
161 case BTMTK_WMT_SEMAPHORE:
162 if (wmt_evt->whdr.flag == 2)
163 status = BTMTK_WMT_PATCH_UNDONE;
164 else
165 status = BTMTK_WMT_PATCH_DONE;
166 break;
167 case BTMTK_WMT_FUNC_CTRL:
168 wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
169 if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
170 status = BTMTK_WMT_ON_DONE;
171 else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
172 status = BTMTK_WMT_ON_PROGRESS;
173 else
174 status = BTMTK_WMT_ON_UNDONE;
175 break;
176 }
177
178 if (wmt_params->status)
179 *wmt_params->status = status;
180
181 err_free_wc:
182 kfree(wc);
183 err_free_skb:
184 kfree_skb(bdev->evt_skb);
185 bdev->evt_skb = NULL;
186
187 return err;
188 }
189
btmtkuart_recv_event(struct hci_dev * hdev,struct sk_buff * skb)190 static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
191 {
192 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
193 struct hci_event_hdr *hdr = (void *)skb->data;
194 int err;
195
196 /* When someone waits for the WMT event, the skb is being cloned
197 * and being processed the events from there then.
198 */
199 if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) {
200 bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
201 if (!bdev->evt_skb) {
202 err = -ENOMEM;
203 goto err_out;
204 }
205 }
206
207 err = hci_recv_frame(hdev, skb);
208 if (err < 0)
209 goto err_free_skb;
210
211 if (hdr->evt == HCI_EV_WMT) {
212 if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
213 &bdev->tx_state)) {
214 /* Barrier to sync with other CPUs */
215 smp_mb__after_atomic();
216 wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
217 }
218 }
219
220 return 0;
221
222 err_free_skb:
223 kfree_skb(bdev->evt_skb);
224 bdev->evt_skb = NULL;
225
226 err_out:
227 return err;
228 }
229
230 static const struct h4_recv_pkt mtk_recv_pkts[] = {
231 { H4_RECV_ACL, .recv = hci_recv_frame },
232 { H4_RECV_SCO, .recv = hci_recv_frame },
233 { H4_RECV_EVENT, .recv = btmtkuart_recv_event },
234 };
235
btmtkuart_tx_work(struct work_struct * work)236 static void btmtkuart_tx_work(struct work_struct *work)
237 {
238 struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
239 tx_work);
240 struct serdev_device *serdev = bdev->serdev;
241 struct hci_dev *hdev = bdev->hdev;
242
243 while (1) {
244 clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
245
246 while (1) {
247 struct sk_buff *skb = skb_dequeue(&bdev->txq);
248 int len;
249
250 if (!skb)
251 break;
252
253 len = serdev_device_write_buf(serdev, skb->data,
254 skb->len);
255 hdev->stat.byte_tx += len;
256
257 skb_pull(skb, len);
258 if (skb->len > 0) {
259 skb_queue_head(&bdev->txq, skb);
260 break;
261 }
262
263 switch (hci_skb_pkt_type(skb)) {
264 case HCI_COMMAND_PKT:
265 hdev->stat.cmd_tx++;
266 break;
267 case HCI_ACLDATA_PKT:
268 hdev->stat.acl_tx++;
269 break;
270 case HCI_SCODATA_PKT:
271 hdev->stat.sco_tx++;
272 break;
273 }
274
275 kfree_skb(skb);
276 }
277
278 if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
279 break;
280 }
281
282 clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
283 }
284
btmtkuart_tx_wakeup(struct btmtkuart_dev * bdev)285 static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
286 {
287 if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
288 set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
289
290 schedule_work(&bdev->tx_work);
291 }
292
293 static const unsigned char *
mtk_stp_split(struct btmtkuart_dev * bdev,const unsigned char * data,int count,int * sz_h4)294 mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
295 int *sz_h4)
296 {
297 struct mtk_stp_hdr *shdr;
298
299 /* The cursor is reset when all the data of STP is consumed out */
300 if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
301 bdev->stp_cursor = 0;
302
303 /* Filling pad until all STP info is obtained */
304 while (bdev->stp_cursor < 6 && count > 0) {
305 bdev->stp_pad[bdev->stp_cursor] = *data;
306 bdev->stp_cursor++;
307 data++;
308 count--;
309 }
310
311 /* Retrieve STP info and have a sanity check */
312 if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
313 shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
314 bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
315
316 /* Resync STP when unexpected data is being read */
317 if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
318 bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
319 shdr->prefix, bdev->stp_dlen);
320 bdev->stp_cursor = 2;
321 bdev->stp_dlen = 0;
322 }
323 }
324
325 /* Directly quit when there's no data found for H4 can process */
326 if (count <= 0)
327 return NULL;
328
329 /* Tranlate to how much the size of data H4 can handle so far */
330 *sz_h4 = min_t(int, count, bdev->stp_dlen);
331
332 /* Update the remaining size of STP packet */
333 bdev->stp_dlen -= *sz_h4;
334
335 /* Data points to STP payload which can be handled by H4 */
336 return data;
337 }
338
btmtkuart_recv(struct hci_dev * hdev,const u8 * data,size_t count)339 static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
340 {
341 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
342 const unsigned char *p_left = data, *p_h4;
343 int sz_left = count, sz_h4, adv;
344 int err;
345
346 while (sz_left > 0) {
347 /* The serial data received from MT7622 BT controller is
348 * at all time padded around with the STP header and tailer.
349 *
350 * A full STP packet is looking like
351 * -----------------------------------
352 * | STP header | H:4 | STP tailer |
353 * -----------------------------------
354 * but it doesn't guarantee to contain a full H:4 packet which
355 * means that it's possible for multiple STP packets forms a
356 * full H:4 packet that means extra STP header + length doesn't
357 * indicate a full H:4 frame, things can fragment. Whose length
358 * recorded in STP header just shows up the most length the
359 * H:4 engine can handle currently.
360 */
361
362 p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
363 if (!p_h4)
364 break;
365
366 adv = p_h4 - p_left;
367 sz_left -= adv;
368 p_left += adv;
369
370 bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
371 sz_h4, mtk_recv_pkts,
372 ARRAY_SIZE(mtk_recv_pkts));
373 if (IS_ERR(bdev->rx_skb)) {
374 err = PTR_ERR(bdev->rx_skb);
375 bt_dev_err(bdev->hdev,
376 "Frame reassembly failed (%d)", err);
377 bdev->rx_skb = NULL;
378 return err;
379 }
380
381 sz_left -= sz_h4;
382 p_left += sz_h4;
383 }
384
385 return 0;
386 }
387
btmtkuart_receive_buf(struct serdev_device * serdev,const u8 * data,size_t count)388 static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
389 size_t count)
390 {
391 struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
392 int err;
393
394 err = btmtkuart_recv(bdev->hdev, data, count);
395 if (err < 0)
396 return err;
397
398 bdev->hdev->stat.byte_rx += count;
399
400 return count;
401 }
402
btmtkuart_write_wakeup(struct serdev_device * serdev)403 static void btmtkuart_write_wakeup(struct serdev_device *serdev)
404 {
405 struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
406
407 btmtkuart_tx_wakeup(bdev);
408 }
409
410 static const struct serdev_device_ops btmtkuart_client_ops = {
411 .receive_buf = btmtkuart_receive_buf,
412 .write_wakeup = btmtkuart_write_wakeup,
413 };
414
btmtkuart_open(struct hci_dev * hdev)415 static int btmtkuart_open(struct hci_dev *hdev)
416 {
417 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
418 struct device *dev;
419 int err;
420
421 err = serdev_device_open(bdev->serdev);
422 if (err) {
423 bt_dev_err(hdev, "Unable to open UART device %s",
424 dev_name(&bdev->serdev->dev));
425 goto err_open;
426 }
427
428 if (btmtkuart_is_standalone(bdev)) {
429 if (bdev->curr_speed != bdev->desired_speed)
430 err = serdev_device_set_baudrate(bdev->serdev,
431 115200);
432 else
433 err = serdev_device_set_baudrate(bdev->serdev,
434 bdev->desired_speed);
435
436 if (err < 0) {
437 bt_dev_err(hdev, "Unable to set baudrate UART device %s",
438 dev_name(&bdev->serdev->dev));
439 goto err_serdev_close;
440 }
441
442 serdev_device_set_flow_control(bdev->serdev, false);
443 }
444
445 bdev->stp_cursor = 2;
446 bdev->stp_dlen = 0;
447
448 dev = &bdev->serdev->dev;
449
450 /* Enable the power domain and clock the device requires */
451 pm_runtime_enable(dev);
452 err = pm_runtime_resume_and_get(dev);
453 if (err < 0)
454 goto err_disable_rpm;
455
456 err = clk_prepare_enable(bdev->clk);
457 if (err < 0)
458 goto err_put_rpm;
459
460 return 0;
461
462 err_put_rpm:
463 pm_runtime_put_sync(dev);
464 err_disable_rpm:
465 pm_runtime_disable(dev);
466 err_serdev_close:
467 serdev_device_close(bdev->serdev);
468 err_open:
469 return err;
470 }
471
btmtkuart_close(struct hci_dev * hdev)472 static int btmtkuart_close(struct hci_dev *hdev)
473 {
474 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
475 struct device *dev = &bdev->serdev->dev;
476
477 /* Shutdown the clock and power domain the device requires */
478 clk_disable_unprepare(bdev->clk);
479 pm_runtime_put_sync(dev);
480 pm_runtime_disable(dev);
481
482 serdev_device_close(bdev->serdev);
483
484 return 0;
485 }
486
btmtkuart_flush(struct hci_dev * hdev)487 static int btmtkuart_flush(struct hci_dev *hdev)
488 {
489 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
490
491 /* Flush any pending characters */
492 serdev_device_write_flush(bdev->serdev);
493 skb_queue_purge(&bdev->txq);
494
495 cancel_work_sync(&bdev->tx_work);
496
497 kfree_skb(bdev->rx_skb);
498 bdev->rx_skb = NULL;
499
500 bdev->stp_cursor = 2;
501 bdev->stp_dlen = 0;
502
503 return 0;
504 }
505
btmtkuart_func_query(struct hci_dev * hdev)506 static int btmtkuart_func_query(struct hci_dev *hdev)
507 {
508 struct btmtk_hci_wmt_params wmt_params;
509 int status, err;
510 u8 param = 0;
511
512 /* Query whether the function is enabled */
513 wmt_params.op = BTMTK_WMT_FUNC_CTRL;
514 wmt_params.flag = 4;
515 wmt_params.dlen = sizeof(param);
516 wmt_params.data = ¶m;
517 wmt_params.status = &status;
518
519 err = mtk_hci_wmt_sync(hdev, &wmt_params);
520 if (err < 0) {
521 bt_dev_err(hdev, "Failed to query function status (%d)", err);
522 return err;
523 }
524
525 return status;
526 }
527
btmtkuart_change_baudrate(struct hci_dev * hdev)528 static int btmtkuart_change_baudrate(struct hci_dev *hdev)
529 {
530 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
531 struct btmtk_hci_wmt_params wmt_params;
532 __le32 baudrate;
533 u8 param;
534 int err;
535
536 /* Indicate the device to enter the probe state the host is
537 * ready to change a new baudrate.
538 */
539 baudrate = cpu_to_le32(bdev->desired_speed);
540 wmt_params.op = BTMTK_WMT_HIF;
541 wmt_params.flag = 1;
542 wmt_params.dlen = 4;
543 wmt_params.data = &baudrate;
544 wmt_params.status = NULL;
545
546 err = mtk_hci_wmt_sync(hdev, &wmt_params);
547 if (err < 0) {
548 bt_dev_err(hdev, "Failed to device baudrate (%d)", err);
549 return err;
550 }
551
552 err = serdev_device_set_baudrate(bdev->serdev,
553 bdev->desired_speed);
554 if (err < 0) {
555 bt_dev_err(hdev, "Failed to set up host baudrate (%d)",
556 err);
557 return err;
558 }
559
560 serdev_device_set_flow_control(bdev->serdev, false);
561
562 /* Send a dummy byte 0xff to activate the new baudrate */
563 param = 0xff;
564 err = serdev_device_write_buf(bdev->serdev, ¶m, sizeof(param));
565 if (err < 0 || err < sizeof(param))
566 return err;
567
568 serdev_device_wait_until_sent(bdev->serdev, 0);
569
570 /* Wait some time for the device changing baudrate done */
571 usleep_range(20000, 22000);
572
573 /* Test the new baudrate */
574 wmt_params.op = BTMTK_WMT_TEST;
575 wmt_params.flag = 7;
576 wmt_params.dlen = 0;
577 wmt_params.data = NULL;
578 wmt_params.status = NULL;
579
580 err = mtk_hci_wmt_sync(hdev, &wmt_params);
581 if (err < 0) {
582 bt_dev_err(hdev, "Failed to test new baudrate (%d)",
583 err);
584 return err;
585 }
586
587 bdev->curr_speed = bdev->desired_speed;
588
589 return 0;
590 }
591
btmtkuart_setup(struct hci_dev * hdev)592 static int btmtkuart_setup(struct hci_dev *hdev)
593 {
594 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
595 struct btmtk_hci_wmt_params wmt_params;
596 ktime_t calltime, delta, rettime;
597 struct btmtk_tci_sleep tci_sleep;
598 unsigned long long duration;
599 struct sk_buff *skb;
600 int err, status;
601 u8 param = 0x1;
602
603 calltime = ktime_get();
604
605 /* Wakeup MCUSYS is required for certain devices before we start to
606 * do any setups.
607 */
608 if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) {
609 wmt_params.op = BTMTK_WMT_WAKEUP;
610 wmt_params.flag = 3;
611 wmt_params.dlen = 0;
612 wmt_params.data = NULL;
613 wmt_params.status = NULL;
614
615 err = mtk_hci_wmt_sync(hdev, &wmt_params);
616 if (err < 0) {
617 bt_dev_err(hdev, "Failed to wakeup the chip (%d)", err);
618 return err;
619 }
620
621 clear_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
622 }
623
624 if (btmtkuart_is_standalone(bdev))
625 btmtkuart_change_baudrate(hdev);
626
627 /* Query whether the firmware is already download */
628 wmt_params.op = BTMTK_WMT_SEMAPHORE;
629 wmt_params.flag = 1;
630 wmt_params.dlen = 0;
631 wmt_params.data = NULL;
632 wmt_params.status = &status;
633
634 err = mtk_hci_wmt_sync(hdev, &wmt_params);
635 if (err < 0) {
636 bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
637 return err;
638 }
639
640 if (status == BTMTK_WMT_PATCH_DONE) {
641 bt_dev_info(hdev, "Firmware already downloaded");
642 goto ignore_setup_fw;
643 }
644
645 /* Setup a firmware which the device definitely requires */
646 err = btmtk_setup_firmware(hdev, bdev->data->fwname, mtk_hci_wmt_sync);
647 if (err < 0)
648 return err;
649
650 ignore_setup_fw:
651 /* Query whether the device is already enabled */
652 err = readx_poll_timeout(btmtkuart_func_query, hdev, status,
653 status < 0 || status != BTMTK_WMT_ON_PROGRESS,
654 2000, 5000000);
655 /* -ETIMEDOUT happens */
656 if (err < 0)
657 return err;
658
659 /* The other errors happen in btusb_mtk_func_query */
660 if (status < 0)
661 return status;
662
663 if (status == BTMTK_WMT_ON_DONE) {
664 bt_dev_info(hdev, "function already on");
665 goto ignore_func_on;
666 }
667
668 /* Enable Bluetooth protocol */
669 wmt_params.op = BTMTK_WMT_FUNC_CTRL;
670 wmt_params.flag = 0;
671 wmt_params.dlen = sizeof(param);
672 wmt_params.data = ¶m;
673 wmt_params.status = NULL;
674
675 err = mtk_hci_wmt_sync(hdev, &wmt_params);
676 if (err < 0) {
677 bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
678 return err;
679 }
680
681 ignore_func_on:
682 /* Apply the low power environment setup */
683 tci_sleep.mode = 0x5;
684 tci_sleep.duration = cpu_to_le16(0x640);
685 tci_sleep.host_duration = cpu_to_le16(0x640);
686 tci_sleep.host_wakeup_pin = 0;
687 tci_sleep.time_compensation = 0;
688
689 skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
690 HCI_INIT_TIMEOUT);
691 if (IS_ERR(skb)) {
692 err = PTR_ERR(skb);
693 bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
694 return err;
695 }
696 kfree_skb(skb);
697
698 rettime = ktime_get();
699 delta = ktime_sub(rettime, calltime);
700 duration = (unsigned long long)ktime_to_ns(delta) >> 10;
701
702 bt_dev_info(hdev, "Device setup in %llu usecs", duration);
703
704 return 0;
705 }
706
btmtkuart_shutdown(struct hci_dev * hdev)707 static int btmtkuart_shutdown(struct hci_dev *hdev)
708 {
709 struct btmtk_hci_wmt_params wmt_params;
710 u8 param = 0x0;
711 int err;
712
713 /* Disable the device */
714 wmt_params.op = BTMTK_WMT_FUNC_CTRL;
715 wmt_params.flag = 0;
716 wmt_params.dlen = sizeof(param);
717 wmt_params.data = ¶m;
718 wmt_params.status = NULL;
719
720 err = mtk_hci_wmt_sync(hdev, &wmt_params);
721 if (err < 0) {
722 bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
723 return err;
724 }
725
726 return 0;
727 }
728
btmtkuart_send_frame(struct hci_dev * hdev,struct sk_buff * skb)729 static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
730 {
731 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
732 struct mtk_stp_hdr *shdr;
733 int err, dlen, type = 0;
734
735 /* Prepend skb with frame type */
736 memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
737
738 /* Make sure that there is enough rooms for STP header and trailer */
739 if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
740 (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
741 err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
742 GFP_ATOMIC);
743 if (err < 0)
744 return err;
745 }
746
747 /* Add the STP header */
748 dlen = skb->len;
749 shdr = skb_push(skb, sizeof(*shdr));
750 shdr->prefix = 0x80;
751 shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12));
752 shdr->cs = 0; /* MT7622 doesn't care about checksum value */
753
754 /* Add the STP trailer */
755 skb_put_zero(skb, MTK_STP_TLR_SIZE);
756
757 skb_queue_tail(&bdev->txq, skb);
758
759 btmtkuart_tx_wakeup(bdev);
760 return 0;
761 }
762
btmtkuart_parse_dt(struct serdev_device * serdev)763 static int btmtkuart_parse_dt(struct serdev_device *serdev)
764 {
765 struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
766 struct device_node *node = serdev->dev.of_node;
767 u32 speed = 921600;
768 int err;
769
770 if (btmtkuart_is_standalone(bdev)) {
771 of_property_read_u32(node, "current-speed", &speed);
772
773 bdev->desired_speed = speed;
774
775 bdev->vcc = devm_regulator_get(&serdev->dev, "vcc");
776 if (IS_ERR(bdev->vcc)) {
777 err = PTR_ERR(bdev->vcc);
778 return err;
779 }
780
781 bdev->osc = devm_clk_get_optional(&serdev->dev, "osc");
782 if (IS_ERR(bdev->osc)) {
783 err = PTR_ERR(bdev->osc);
784 return err;
785 }
786
787 bdev->boot = devm_gpiod_get_optional(&serdev->dev, "boot",
788 GPIOD_OUT_LOW);
789 if (IS_ERR(bdev->boot)) {
790 err = PTR_ERR(bdev->boot);
791 return err;
792 }
793
794 bdev->pinctrl = devm_pinctrl_get(&serdev->dev);
795 if (IS_ERR(bdev->pinctrl)) {
796 err = PTR_ERR(bdev->pinctrl);
797 return err;
798 }
799
800 bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl,
801 "default");
802 if (IS_ERR(bdev->pins_boot) && !bdev->boot) {
803 err = PTR_ERR(bdev->pins_boot);
804 dev_err(&serdev->dev,
805 "Should assign RXD to LOW at boot stage\n");
806 return err;
807 }
808
809 bdev->pins_runtime = pinctrl_lookup_state(bdev->pinctrl,
810 "runtime");
811 if (IS_ERR(bdev->pins_runtime)) {
812 err = PTR_ERR(bdev->pins_runtime);
813 return err;
814 }
815
816 bdev->reset = devm_gpiod_get_optional(&serdev->dev, "reset",
817 GPIOD_OUT_LOW);
818 if (IS_ERR(bdev->reset)) {
819 err = PTR_ERR(bdev->reset);
820 return err;
821 }
822 } else if (btmtkuart_is_builtin_soc(bdev)) {
823 bdev->clk = devm_clk_get(&serdev->dev, "ref");
824 if (IS_ERR(bdev->clk))
825 return PTR_ERR(bdev->clk);
826 }
827
828 return 0;
829 }
830
btmtkuart_probe(struct serdev_device * serdev)831 static int btmtkuart_probe(struct serdev_device *serdev)
832 {
833 struct btmtkuart_dev *bdev;
834 struct hci_dev *hdev;
835 int err;
836
837 bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
838 if (!bdev)
839 return -ENOMEM;
840
841 bdev->data = of_device_get_match_data(&serdev->dev);
842 if (!bdev->data)
843 return -ENODEV;
844
845 bdev->serdev = serdev;
846 serdev_device_set_drvdata(serdev, bdev);
847
848 serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
849
850 err = btmtkuart_parse_dt(serdev);
851 if (err < 0)
852 return err;
853
854 INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
855 skb_queue_head_init(&bdev->txq);
856
857 /* Initialize and register HCI device */
858 hdev = hci_alloc_dev();
859 if (!hdev) {
860 dev_err(&serdev->dev, "Can't allocate HCI device\n");
861 return -ENOMEM;
862 }
863
864 bdev->hdev = hdev;
865
866 hdev->bus = HCI_UART;
867 hci_set_drvdata(hdev, bdev);
868
869 hdev->open = btmtkuart_open;
870 hdev->close = btmtkuart_close;
871 hdev->flush = btmtkuart_flush;
872 hdev->setup = btmtkuart_setup;
873 hdev->shutdown = btmtkuart_shutdown;
874 hdev->send = btmtkuart_send_frame;
875 hdev->set_bdaddr = btmtk_set_bdaddr;
876 SET_HCIDEV_DEV(hdev, &serdev->dev);
877
878 hdev->manufacturer = 70;
879 set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
880
881 if (btmtkuart_is_standalone(bdev)) {
882 err = clk_prepare_enable(bdev->osc);
883 if (err < 0)
884 goto err_hci_free_dev;
885
886 if (bdev->boot) {
887 gpiod_set_value_cansleep(bdev->boot, 1);
888 } else {
889 /* Switch to the specific pin state for the booting
890 * requires.
891 */
892 pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
893 }
894
895 /* Power on */
896 err = regulator_enable(bdev->vcc);
897 if (err < 0)
898 goto err_clk_disable_unprepare;
899
900 /* Reset if the reset-gpios is available otherwise the board
901 * -level design should be guaranteed.
902 */
903 if (bdev->reset) {
904 gpiod_set_value_cansleep(bdev->reset, 1);
905 usleep_range(1000, 2000);
906 gpiod_set_value_cansleep(bdev->reset, 0);
907 }
908
909 /* Wait some time until device got ready and switch to the pin
910 * mode the device requires for UART transfers.
911 */
912 msleep(50);
913
914 if (bdev->boot)
915 devm_gpiod_put(&serdev->dev, bdev->boot);
916
917 pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime);
918
919 /* A standalone device doesn't depends on power domain on SoC,
920 * so mark it as no callbacks.
921 */
922 pm_runtime_no_callbacks(&serdev->dev);
923
924 set_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
925 }
926
927 err = hci_register_dev(hdev);
928 if (err < 0) {
929 dev_err(&serdev->dev, "Can't register HCI device\n");
930 goto err_regulator_disable;
931 }
932
933 return 0;
934
935 err_regulator_disable:
936 if (btmtkuart_is_standalone(bdev))
937 regulator_disable(bdev->vcc);
938 err_clk_disable_unprepare:
939 if (btmtkuart_is_standalone(bdev))
940 clk_disable_unprepare(bdev->osc);
941 err_hci_free_dev:
942 hci_free_dev(hdev);
943
944 return err;
945 }
946
btmtkuart_remove(struct serdev_device * serdev)947 static void btmtkuart_remove(struct serdev_device *serdev)
948 {
949 struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
950 struct hci_dev *hdev = bdev->hdev;
951
952 if (btmtkuart_is_standalone(bdev)) {
953 regulator_disable(bdev->vcc);
954 clk_disable_unprepare(bdev->osc);
955 }
956
957 hci_unregister_dev(hdev);
958 hci_free_dev(hdev);
959 }
960
961 static const struct btmtkuart_data mt7622_data __maybe_unused = {
962 .fwname = FIRMWARE_MT7622,
963 };
964
965 static const struct btmtkuart_data mt7663_data __maybe_unused = {
966 .flags = BTMTKUART_FLAG_STANDALONE_HW,
967 .fwname = FIRMWARE_MT7663,
968 };
969
970 static const struct btmtkuart_data mt7668_data __maybe_unused = {
971 .flags = BTMTKUART_FLAG_STANDALONE_HW,
972 .fwname = FIRMWARE_MT7668,
973 };
974
975 #ifdef CONFIG_OF
976 static const struct of_device_id mtk_of_match_table[] = {
977 { .compatible = "mediatek,mt7622-bluetooth", .data = &mt7622_data},
978 { .compatible = "mediatek,mt7663u-bluetooth", .data = &mt7663_data},
979 { .compatible = "mediatek,mt7668u-bluetooth", .data = &mt7668_data},
980 { }
981 };
982 MODULE_DEVICE_TABLE(of, mtk_of_match_table);
983 #endif
984
985 static struct serdev_device_driver btmtkuart_driver = {
986 .probe = btmtkuart_probe,
987 .remove = btmtkuart_remove,
988 .driver = {
989 .name = "btmtkuart",
990 .of_match_table = of_match_ptr(mtk_of_match_table),
991 },
992 };
993
994 module_serdev_device_driver(btmtkuart_driver);
995
996 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
997 MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
998 MODULE_VERSION(VERSION);
999 MODULE_LICENSE("GPL");
1000