1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2018 MediaTek Inc.
3 
4 /*
5  * Bluetooth support for MediaTek serial devices
6  *
7  * Author: Sean Wang <sean.wang@mediatek.com>
8  *
9  */
10 
11 #include <asm/unaligned.h>
12 #include <linux/atomic.h>
13 #include <linux/clk.h>
14 #include <linux/firmware.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/serdev.h>
20 #include <linux/skbuff.h>
21 
22 #include <net/bluetooth/bluetooth.h>
23 #include <net/bluetooth/hci_core.h>
24 
25 #include "h4_recv.h"
26 
27 #define VERSION "0.1"
28 
29 #define FIRMWARE_MT7622		"mediatek/mt7622pr2h.bin"
30 
31 #define MTK_STP_TLR_SIZE	2
32 
33 #define BTMTKUART_TX_STATE_ACTIVE	1
34 #define BTMTKUART_TX_STATE_WAKEUP	2
35 #define BTMTKUART_TX_WAIT_VND_EVT	3
36 
37 enum {
38 	MTK_WMT_PATCH_DWNLD = 0x1,
39 	MTK_WMT_FUNC_CTRL = 0x6,
40 	MTK_WMT_RST = 0x7
41 };
42 
43 struct mtk_stp_hdr {
44 	u8	prefix;
45 	__be16	dlen;
46 	u8	cs;
47 } __packed;
48 
49 struct mtk_wmt_hdr {
50 	u8	dir;
51 	u8	op;
52 	__le16	dlen;
53 	u8	flag;
54 } __packed;
55 
56 struct mtk_hci_wmt_cmd {
57 	struct mtk_wmt_hdr hdr;
58 	u8 data[256];
59 } __packed;
60 
61 struct btmtkuart_dev {
62 	struct hci_dev *hdev;
63 	struct serdev_device *serdev;
64 	struct clk *clk;
65 
66 	struct work_struct tx_work;
67 	unsigned long tx_state;
68 	struct sk_buff_head txq;
69 
70 	struct sk_buff *rx_skb;
71 
72 	u8	stp_pad[6];
73 	u8	stp_cursor;
74 	u16	stp_dlen;
75 };
76 
mtk_hci_wmt_sync(struct hci_dev * hdev,u8 op,u8 flag,u16 plen,const void * param)77 static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen,
78 			    const void *param)
79 {
80 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
81 	struct mtk_hci_wmt_cmd wc;
82 	struct mtk_wmt_hdr *hdr;
83 	u32 hlen;
84 	int err;
85 
86 	hlen = sizeof(*hdr) + plen;
87 	if (hlen > 255)
88 		return -EINVAL;
89 
90 	hdr = (struct mtk_wmt_hdr *)&wc;
91 	hdr->dir = 1;
92 	hdr->op = op;
93 	hdr->dlen = cpu_to_le16(plen + 1);
94 	hdr->flag = flag;
95 	memcpy(wc.data, param, plen);
96 
97 	set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
98 
99 	err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
100 	if (err < 0) {
101 		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
102 		return err;
103 	}
104 
105 	/* The vendor specific WMT commands are all answered by a vendor
106 	 * specific event and will not have the Command Status or Command
107 	 * Complete as with usual HCI command flow control.
108 	 *
109 	 * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
110 	 * state to be cleared. The driver speicfic event receive routine
111 	 * will clear that state and with that indicate completion of the
112 	 * WMT command.
113 	 */
114 	err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
115 				  TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
116 	if (err == -EINTR) {
117 		bt_dev_err(hdev, "Execution of wmt command interrupted");
118 		return err;
119 	}
120 
121 	if (err) {
122 		bt_dev_err(hdev, "Execution of wmt command timed out");
123 		return -ETIMEDOUT;
124 	}
125 
126 	return 0;
127 }
128 
mtk_setup_fw(struct hci_dev * hdev)129 static int mtk_setup_fw(struct hci_dev *hdev)
130 {
131 	const struct firmware *fw;
132 	const u8 *fw_ptr;
133 	size_t fw_size;
134 	int err, dlen;
135 	u8 flag;
136 
137 	err = request_firmware(&fw, FIRMWARE_MT7622, &hdev->dev);
138 	if (err < 0) {
139 		bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
140 		return err;
141 	}
142 
143 	fw_ptr = fw->data;
144 	fw_size = fw->size;
145 
146 	/* The size of patch header is 30 bytes, should be skip */
147 	if (fw_size < 30) {
148 		err = -EINVAL;
149 		goto free_fw;
150 	}
151 
152 	fw_size -= 30;
153 	fw_ptr += 30;
154 	flag = 1;
155 
156 	while (fw_size > 0) {
157 		dlen = min_t(int, 250, fw_size);
158 
159 		/* Tell device the position in sequence */
160 		if (fw_size - dlen <= 0)
161 			flag = 3;
162 		else if (fw_size < fw->size - 30)
163 			flag = 2;
164 
165 		err = mtk_hci_wmt_sync(hdev, MTK_WMT_PATCH_DWNLD, flag, dlen,
166 				       fw_ptr);
167 		if (err < 0) {
168 			bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
169 				   err);
170 			break;
171 		}
172 
173 		fw_size -= dlen;
174 		fw_ptr += dlen;
175 	}
176 
177 free_fw:
178 	release_firmware(fw);
179 	return err;
180 }
181 
btmtkuart_recv_event(struct hci_dev * hdev,struct sk_buff * skb)182 static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
183 {
184 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
185 	struct hci_event_hdr *hdr = (void *)skb->data;
186 	int err;
187 
188 	/* Fix up the vendor event id with 0xff for vendor specific instead
189 	 * of 0xe4 so that event send via monitoring socket can be parsed
190 	 * properly.
191 	 */
192 	if (hdr->evt == 0xe4)
193 		hdr->evt = HCI_EV_VENDOR;
194 
195 	err = hci_recv_frame(hdev, skb);
196 
197 	if (hdr->evt == HCI_EV_VENDOR) {
198 		if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
199 				       &bdev->tx_state)) {
200 			/* Barrier to sync with other CPUs */
201 			smp_mb__after_atomic();
202 			wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
203 		}
204 	}
205 
206 	return err;
207 }
208 
209 static const struct h4_recv_pkt mtk_recv_pkts[] = {
210 	{ H4_RECV_ACL,      .recv = hci_recv_frame },
211 	{ H4_RECV_SCO,      .recv = hci_recv_frame },
212 	{ H4_RECV_EVENT,    .recv = btmtkuart_recv_event },
213 };
214 
btmtkuart_tx_work(struct work_struct * work)215 static void btmtkuart_tx_work(struct work_struct *work)
216 {
217 	struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
218 						   tx_work);
219 	struct serdev_device *serdev = bdev->serdev;
220 	struct hci_dev *hdev = bdev->hdev;
221 
222 	while (1) {
223 		clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
224 
225 		while (1) {
226 			struct sk_buff *skb = skb_dequeue(&bdev->txq);
227 			int len;
228 
229 			if (!skb)
230 				break;
231 
232 			len = serdev_device_write_buf(serdev, skb->data,
233 						      skb->len);
234 			hdev->stat.byte_tx += len;
235 
236 			skb_pull(skb, len);
237 			if (skb->len > 0) {
238 				skb_queue_head(&bdev->txq, skb);
239 				break;
240 			}
241 
242 			switch (hci_skb_pkt_type(skb)) {
243 			case HCI_COMMAND_PKT:
244 				hdev->stat.cmd_tx++;
245 				break;
246 			case HCI_ACLDATA_PKT:
247 				hdev->stat.acl_tx++;
248 				break;
249 			case HCI_SCODATA_PKT:
250 				hdev->stat.sco_tx++;
251 				break;
252 			}
253 
254 			kfree_skb(skb);
255 		}
256 
257 		if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
258 			break;
259 	}
260 
261 	clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
262 }
263 
btmtkuart_tx_wakeup(struct btmtkuart_dev * bdev)264 static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
265 {
266 	if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
267 		set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
268 
269 	schedule_work(&bdev->tx_work);
270 }
271 
272 static const unsigned char *
mtk_stp_split(struct btmtkuart_dev * bdev,const unsigned char * data,int count,int * sz_h4)273 mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
274 	      int *sz_h4)
275 {
276 	struct mtk_stp_hdr *shdr;
277 
278 	/* The cursor is reset when all the data of STP is consumed out */
279 	if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
280 		bdev->stp_cursor = 0;
281 
282 	/* Filling pad until all STP info is obtained */
283 	while (bdev->stp_cursor < 6 && count > 0) {
284 		bdev->stp_pad[bdev->stp_cursor] = *data;
285 		bdev->stp_cursor++;
286 		data++;
287 		count--;
288 	}
289 
290 	/* Retrieve STP info and have a sanity check */
291 	if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
292 		shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
293 		bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
294 
295 		/* Resync STP when unexpected data is being read */
296 		if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
297 			bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
298 				   shdr->prefix, bdev->stp_dlen);
299 			bdev->stp_cursor = 2;
300 			bdev->stp_dlen = 0;
301 		}
302 	}
303 
304 	/* Directly quit when there's no data found for H4 can process */
305 	if (count <= 0)
306 		return NULL;
307 
308 	/* Tranlate to how much the size of data H4 can handle so far */
309 	*sz_h4 = min_t(int, count, bdev->stp_dlen);
310 
311 	/* Update the remaining size of STP packet */
312 	bdev->stp_dlen -= *sz_h4;
313 
314 	/* Data points to STP payload which can be handled by H4 */
315 	return data;
316 }
317 
btmtkuart_recv(struct hci_dev * hdev,const u8 * data,size_t count)318 static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
319 {
320 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
321 	const unsigned char *p_left = data, *p_h4;
322 	int sz_left = count, sz_h4, adv;
323 	int err;
324 
325 	while (sz_left > 0) {
326 		/*  The serial data received from MT7622 BT controller is
327 		 *  at all time padded around with the STP header and tailer.
328 		 *
329 		 *  A full STP packet is looking like
330 		 *   -----------------------------------
331 		 *  | STP header  |  H:4   | STP tailer |
332 		 *   -----------------------------------
333 		 *  but it doesn't guarantee to contain a full H:4 packet which
334 		 *  means that it's possible for multiple STP packets forms a
335 		 *  full H:4 packet that means extra STP header + length doesn't
336 		 *  indicate a full H:4 frame, things can fragment. Whose length
337 		 *  recorded in STP header just shows up the most length the
338 		 *  H:4 engine can handle currently.
339 		 */
340 
341 		p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
342 		if (!p_h4)
343 			break;
344 
345 		adv = p_h4 - p_left;
346 		sz_left -= adv;
347 		p_left += adv;
348 
349 		bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
350 					   sz_h4, mtk_recv_pkts,
351 					   ARRAY_SIZE(mtk_recv_pkts));
352 		if (IS_ERR(bdev->rx_skb)) {
353 			err = PTR_ERR(bdev->rx_skb);
354 			bt_dev_err(bdev->hdev,
355 				   "Frame reassembly failed (%d)", err);
356 			bdev->rx_skb = NULL;
357 			return err;
358 		}
359 
360 		sz_left -= sz_h4;
361 		p_left += sz_h4;
362 	}
363 
364 	return 0;
365 }
366 
btmtkuart_receive_buf(struct serdev_device * serdev,const u8 * data,size_t count)367 static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
368 				 size_t count)
369 {
370 	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
371 	int err;
372 
373 	err = btmtkuart_recv(bdev->hdev, data, count);
374 	if (err < 0)
375 		return err;
376 
377 	bdev->hdev->stat.byte_rx += count;
378 
379 	return count;
380 }
381 
btmtkuart_write_wakeup(struct serdev_device * serdev)382 static void btmtkuart_write_wakeup(struct serdev_device *serdev)
383 {
384 	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
385 
386 	btmtkuart_tx_wakeup(bdev);
387 }
388 
389 static const struct serdev_device_ops btmtkuart_client_ops = {
390 	.receive_buf = btmtkuart_receive_buf,
391 	.write_wakeup = btmtkuart_write_wakeup,
392 };
393 
btmtkuart_open(struct hci_dev * hdev)394 static int btmtkuart_open(struct hci_dev *hdev)
395 {
396 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
397 	struct device *dev;
398 	int err;
399 
400 	err = serdev_device_open(bdev->serdev);
401 	if (err) {
402 		bt_dev_err(hdev, "Unable to open UART device %s",
403 			   dev_name(&bdev->serdev->dev));
404 		goto err_open;
405 	}
406 
407 	bdev->stp_cursor = 2;
408 	bdev->stp_dlen = 0;
409 
410 	dev = &bdev->serdev->dev;
411 
412 	/* Enable the power domain and clock the device requires */
413 	pm_runtime_enable(dev);
414 	err = pm_runtime_get_sync(dev);
415 	if (err < 0) {
416 		pm_runtime_put_noidle(dev);
417 		goto err_disable_rpm;
418 	}
419 
420 	err = clk_prepare_enable(bdev->clk);
421 	if (err < 0)
422 		goto err_put_rpm;
423 
424 	return 0;
425 
426 err_put_rpm:
427 	pm_runtime_put_sync(dev);
428 err_disable_rpm:
429 	pm_runtime_disable(dev);
430 err_open:
431 	return err;
432 }
433 
btmtkuart_close(struct hci_dev * hdev)434 static int btmtkuart_close(struct hci_dev *hdev)
435 {
436 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
437 	struct device *dev = &bdev->serdev->dev;
438 
439 	/* Shutdown the clock and power domain the device requires */
440 	clk_disable_unprepare(bdev->clk);
441 	pm_runtime_put_sync(dev);
442 	pm_runtime_disable(dev);
443 
444 	serdev_device_close(bdev->serdev);
445 
446 	return 0;
447 }
448 
btmtkuart_flush(struct hci_dev * hdev)449 static int btmtkuart_flush(struct hci_dev *hdev)
450 {
451 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
452 
453 	/* Flush any pending characters */
454 	serdev_device_write_flush(bdev->serdev);
455 	skb_queue_purge(&bdev->txq);
456 
457 	cancel_work_sync(&bdev->tx_work);
458 
459 	kfree_skb(bdev->rx_skb);
460 	bdev->rx_skb = NULL;
461 
462 	bdev->stp_cursor = 2;
463 	bdev->stp_dlen = 0;
464 
465 	return 0;
466 }
467 
btmtkuart_setup(struct hci_dev * hdev)468 static int btmtkuart_setup(struct hci_dev *hdev)
469 {
470 	u8 param = 0x1;
471 	int err = 0;
472 
473 	/* Setup a firmware which the device definitely requires */
474 	err = mtk_setup_fw(hdev);
475 	if (err < 0)
476 		return err;
477 
478 	/* Activate function the firmware providing to */
479 	err = mtk_hci_wmt_sync(hdev, MTK_WMT_RST, 0x4, 0, 0);
480 	if (err < 0) {
481 		bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
482 		return err;
483 	}
484 
485 	/* Enable Bluetooth protocol */
486 	err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param),
487 			       &param);
488 	if (err < 0) {
489 		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
490 		return err;
491 	}
492 
493 	return 0;
494 }
495 
btmtkuart_shutdown(struct hci_dev * hdev)496 static int btmtkuart_shutdown(struct hci_dev *hdev)
497 {
498 	u8 param = 0x0;
499 	int err;
500 
501 	/* Disable the device */
502 	err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param),
503 			       &param);
504 	if (err < 0) {
505 		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
506 		return err;
507 	}
508 
509 	return 0;
510 }
511 
btmtkuart_send_frame(struct hci_dev * hdev,struct sk_buff * skb)512 static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
513 {
514 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
515 	struct mtk_stp_hdr *shdr;
516 	int err, dlen, type = 0;
517 
518 	/* Prepend skb with frame type */
519 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
520 
521 	/* Make sure that there is enough rooms for STP header and trailer */
522 	if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
523 	    (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
524 		err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
525 				       GFP_ATOMIC);
526 		if (err < 0)
527 			return err;
528 	}
529 
530 	/* Add the STP header */
531 	dlen = skb->len;
532 	shdr = skb_push(skb, sizeof(*shdr));
533 	shdr->prefix = 0x80;
534 	shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12));
535 	shdr->cs = 0;		/* MT7622 doesn't care about checksum value */
536 
537 	/* Add the STP trailer */
538 	skb_put_zero(skb, MTK_STP_TLR_SIZE);
539 
540 	skb_queue_tail(&bdev->txq, skb);
541 
542 	btmtkuart_tx_wakeup(bdev);
543 	return 0;
544 }
545 
btmtkuart_probe(struct serdev_device * serdev)546 static int btmtkuart_probe(struct serdev_device *serdev)
547 {
548 	struct btmtkuart_dev *bdev;
549 	struct hci_dev *hdev;
550 
551 	bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
552 	if (!bdev)
553 		return -ENOMEM;
554 
555 	bdev->clk = devm_clk_get(&serdev->dev, "ref");
556 	if (IS_ERR(bdev->clk))
557 		return PTR_ERR(bdev->clk);
558 
559 	bdev->serdev = serdev;
560 	serdev_device_set_drvdata(serdev, bdev);
561 
562 	serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
563 
564 	INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
565 	skb_queue_head_init(&bdev->txq);
566 
567 	/* Initialize and register HCI device */
568 	hdev = hci_alloc_dev();
569 	if (!hdev) {
570 		dev_err(&serdev->dev, "Can't allocate HCI device\n");
571 		return -ENOMEM;
572 	}
573 
574 	bdev->hdev = hdev;
575 
576 	hdev->bus = HCI_UART;
577 	hci_set_drvdata(hdev, bdev);
578 
579 	hdev->open     = btmtkuart_open;
580 	hdev->close    = btmtkuart_close;
581 	hdev->flush    = btmtkuart_flush;
582 	hdev->setup    = btmtkuart_setup;
583 	hdev->shutdown = btmtkuart_shutdown;
584 	hdev->send     = btmtkuart_send_frame;
585 	SET_HCIDEV_DEV(hdev, &serdev->dev);
586 
587 	hdev->manufacturer = 70;
588 	set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
589 
590 	if (hci_register_dev(hdev) < 0) {
591 		dev_err(&serdev->dev, "Can't register HCI device\n");
592 		hci_free_dev(hdev);
593 		return -ENODEV;
594 	}
595 
596 	return 0;
597 }
598 
btmtkuart_remove(struct serdev_device * serdev)599 static void btmtkuart_remove(struct serdev_device *serdev)
600 {
601 	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
602 	struct hci_dev *hdev = bdev->hdev;
603 
604 	hci_unregister_dev(hdev);
605 	hci_free_dev(hdev);
606 }
607 
608 #ifdef CONFIG_OF
609 static const struct of_device_id mtk_of_match_table[] = {
610 	{ .compatible = "mediatek,mt7622-bluetooth"},
611 	{ }
612 };
613 MODULE_DEVICE_TABLE(of, mtk_of_match_table);
614 #endif
615 
616 static struct serdev_device_driver btmtkuart_driver = {
617 	.probe = btmtkuart_probe,
618 	.remove = btmtkuart_remove,
619 	.driver = {
620 		.name = "btmtkuart",
621 		.of_match_table = of_match_ptr(mtk_of_match_table),
622 	},
623 };
624 
625 module_serdev_device_driver(btmtkuart_driver);
626 
627 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
628 MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
629 MODULE_VERSION(VERSION);
630 MODULE_LICENSE("GPL");
631 MODULE_FIRMWARE(FIRMWARE_MT7622);
632