1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2019 MediaTek Inc.
3 
4 /*
5  * Bluetooth support for MediaTek SDIO devices
6  *
7  * This file is written based on btsdio.c and btmtkuart.c.
8  *
9  * Author: Sean Wang <sean.wang@mediatek.com>
10  *
11  */
12 
13 #include <asm/unaligned.h>
14 #include <linux/atomic.h>
15 #include <linux/firmware.h>
16 #include <linux/init.h>
17 #include <linux/iopoll.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/skbuff.h>
22 
23 #include <linux/mmc/host.h>
24 #include <linux/mmc/sdio_ids.h>
25 #include <linux/mmc/sdio_func.h>
26 
27 #include <net/bluetooth/bluetooth.h>
28 #include <net/bluetooth/hci_core.h>
29 
30 #include "h4_recv.h"
31 
32 #define VERSION "0.1"
33 
34 #define FIRMWARE_MT7663		"mediatek/mt7663pr2h.bin"
35 #define FIRMWARE_MT7668		"mediatek/mt7668pr2h.bin"
36 
37 #define MTKBTSDIO_AUTOSUSPEND_DELAY	8000
38 
39 static bool enable_autosuspend;
40 
41 struct btmtksdio_data {
42 	const char *fwname;
43 };
44 
45 static const struct btmtksdio_data mt7663_data = {
46 	.fwname = FIRMWARE_MT7663,
47 };
48 
49 static const struct btmtksdio_data mt7668_data = {
50 	.fwname = FIRMWARE_MT7668,
51 };
52 
53 static const struct sdio_device_id btmtksdio_table[] = {
54 	{SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7663),
55 	 .driver_data = (kernel_ulong_t)&mt7663_data },
56 	{SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7668),
57 	 .driver_data = (kernel_ulong_t)&mt7668_data },
58 	{ }	/* Terminating entry */
59 };
60 MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
61 
62 #define MTK_REG_CHLPCR		0x4	/* W1S */
63 #define C_INT_EN_SET		BIT(0)
64 #define C_INT_EN_CLR		BIT(1)
65 #define C_FW_OWN_REQ_SET	BIT(8)  /* For write */
66 #define C_COM_DRV_OWN		BIT(8)  /* For read */
67 #define C_FW_OWN_REQ_CLR	BIT(9)
68 
69 #define MTK_REG_CSDIOCSR	0x8
70 #define SDIO_RE_INIT_EN		BIT(0)
71 #define SDIO_INT_CTL		BIT(2)
72 
73 #define MTK_REG_CHCR		0xc
74 #define C_INT_CLR_CTRL		BIT(1)
75 
76 /* CHISR have the same bits field definition with CHIER */
77 #define MTK_REG_CHISR		0x10
78 #define MTK_REG_CHIER		0x14
79 #define FW_OWN_BACK_INT		BIT(0)
80 #define RX_DONE_INT		BIT(1)
81 #define TX_EMPTY		BIT(2)
82 #define TX_FIFO_OVERFLOW	BIT(8)
83 #define RX_PKT_LEN		GENMASK(31, 16)
84 
85 #define MTK_REG_CTDR		0x18
86 
87 #define MTK_REG_CRDR		0x1c
88 
89 #define MTK_SDIO_BLOCK_SIZE	256
90 
91 #define BTMTKSDIO_TX_WAIT_VND_EVT	1
92 
93 enum {
94 	MTK_WMT_PATCH_DWNLD = 0x1,
95 	MTK_WMT_TEST = 0x2,
96 	MTK_WMT_WAKEUP = 0x3,
97 	MTK_WMT_HIF = 0x4,
98 	MTK_WMT_FUNC_CTRL = 0x6,
99 	MTK_WMT_RST = 0x7,
100 	MTK_WMT_SEMAPHORE = 0x17,
101 };
102 
103 enum {
104 	BTMTK_WMT_INVALID,
105 	BTMTK_WMT_PATCH_UNDONE,
106 	BTMTK_WMT_PATCH_DONE,
107 	BTMTK_WMT_ON_UNDONE,
108 	BTMTK_WMT_ON_DONE,
109 	BTMTK_WMT_ON_PROGRESS,
110 };
111 
112 struct mtkbtsdio_hdr {
113 	__le16	len;
114 	__le16	reserved;
115 	u8	bt_type;
116 } __packed;
117 
118 struct mtk_wmt_hdr {
119 	u8	dir;
120 	u8	op;
121 	__le16	dlen;
122 	u8	flag;
123 } __packed;
124 
125 struct mtk_hci_wmt_cmd {
126 	struct mtk_wmt_hdr hdr;
127 	u8 data[256];
128 } __packed;
129 
130 struct btmtk_hci_wmt_evt {
131 	struct hci_event_hdr hhdr;
132 	struct mtk_wmt_hdr whdr;
133 } __packed;
134 
135 struct btmtk_hci_wmt_evt_funcc {
136 	struct btmtk_hci_wmt_evt hwhdr;
137 	__be16 status;
138 } __packed;
139 
140 struct btmtk_tci_sleep {
141 	u8 mode;
142 	__le16 duration;
143 	__le16 host_duration;
144 	u8 host_wakeup_pin;
145 	u8 time_compensation;
146 } __packed;
147 
148 struct btmtk_hci_wmt_params {
149 	u8 op;
150 	u8 flag;
151 	u16 dlen;
152 	const void *data;
153 	u32 *status;
154 };
155 
156 struct btmtksdio_dev {
157 	struct hci_dev *hdev;
158 	struct sdio_func *func;
159 	struct device *dev;
160 
161 	struct work_struct tx_work;
162 	unsigned long tx_state;
163 	struct sk_buff_head txq;
164 
165 	struct sk_buff *evt_skb;
166 
167 	const struct btmtksdio_data *data;
168 };
169 
mtk_hci_wmt_sync(struct hci_dev * hdev,struct btmtk_hci_wmt_params * wmt_params)170 static int mtk_hci_wmt_sync(struct hci_dev *hdev,
171 			    struct btmtk_hci_wmt_params *wmt_params)
172 {
173 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
174 	struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
175 	u32 hlen, status = BTMTK_WMT_INVALID;
176 	struct btmtk_hci_wmt_evt *wmt_evt;
177 	struct mtk_hci_wmt_cmd wc;
178 	struct mtk_wmt_hdr *hdr;
179 	int err;
180 
181 	hlen = sizeof(*hdr) + wmt_params->dlen;
182 	if (hlen > 255)
183 		return -EINVAL;
184 
185 	hdr = (struct mtk_wmt_hdr *)&wc;
186 	hdr->dir = 1;
187 	hdr->op = wmt_params->op;
188 	hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
189 	hdr->flag = wmt_params->flag;
190 	memcpy(wc.data, wmt_params->data, wmt_params->dlen);
191 
192 	set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
193 
194 	err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
195 	if (err < 0) {
196 		clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
197 		return err;
198 	}
199 
200 	/* The vendor specific WMT commands are all answered by a vendor
201 	 * specific event and will not have the Command Status or Command
202 	 * Complete as with usual HCI command flow control.
203 	 *
204 	 * After sending the command, wait for BTMTKSDIO_TX_WAIT_VND_EVT
205 	 * state to be cleared. The driver specific event receive routine
206 	 * will clear that state and with that indicate completion of the
207 	 * WMT command.
208 	 */
209 	err = wait_on_bit_timeout(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT,
210 				  TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
211 	if (err == -EINTR) {
212 		bt_dev_err(hdev, "Execution of wmt command interrupted");
213 		clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
214 		return err;
215 	}
216 
217 	if (err) {
218 		bt_dev_err(hdev, "Execution of wmt command timed out");
219 		clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
220 		return -ETIMEDOUT;
221 	}
222 
223 	/* Parse and handle the return WMT event */
224 	wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
225 	if (wmt_evt->whdr.op != hdr->op) {
226 		bt_dev_err(hdev, "Wrong op received %d expected %d",
227 			   wmt_evt->whdr.op, hdr->op);
228 		err = -EIO;
229 		goto err_free_skb;
230 	}
231 
232 	switch (wmt_evt->whdr.op) {
233 	case MTK_WMT_SEMAPHORE:
234 		if (wmt_evt->whdr.flag == 2)
235 			status = BTMTK_WMT_PATCH_UNDONE;
236 		else
237 			status = BTMTK_WMT_PATCH_DONE;
238 		break;
239 	case MTK_WMT_FUNC_CTRL:
240 		wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
241 		if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
242 			status = BTMTK_WMT_ON_DONE;
243 		else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
244 			status = BTMTK_WMT_ON_PROGRESS;
245 		else
246 			status = BTMTK_WMT_ON_UNDONE;
247 		break;
248 	}
249 
250 	if (wmt_params->status)
251 		*wmt_params->status = status;
252 
253 err_free_skb:
254 	kfree_skb(bdev->evt_skb);
255 	bdev->evt_skb = NULL;
256 
257 	return err;
258 }
259 
btmtksdio_tx_packet(struct btmtksdio_dev * bdev,struct sk_buff * skb)260 static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev,
261 			       struct sk_buff *skb)
262 {
263 	struct mtkbtsdio_hdr *sdio_hdr;
264 	int err;
265 
266 	/* Make sure that there are enough rooms for SDIO header */
267 	if (unlikely(skb_headroom(skb) < sizeof(*sdio_hdr))) {
268 		err = pskb_expand_head(skb, sizeof(*sdio_hdr), 0,
269 				       GFP_ATOMIC);
270 		if (err < 0)
271 			return err;
272 	}
273 
274 	/* Prepend MediaTek SDIO Specific Header */
275 	skb_push(skb, sizeof(*sdio_hdr));
276 
277 	sdio_hdr = (void *)skb->data;
278 	sdio_hdr->len = cpu_to_le16(skb->len);
279 	sdio_hdr->reserved = cpu_to_le16(0);
280 	sdio_hdr->bt_type = hci_skb_pkt_type(skb);
281 
282 	err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
283 			   round_up(skb->len, MTK_SDIO_BLOCK_SIZE));
284 	if (err < 0)
285 		goto err_skb_pull;
286 
287 	bdev->hdev->stat.byte_tx += skb->len;
288 
289 	kfree_skb(skb);
290 
291 	return 0;
292 
293 err_skb_pull:
294 	skb_pull(skb, sizeof(*sdio_hdr));
295 
296 	return err;
297 }
298 
btmtksdio_drv_own_query(struct btmtksdio_dev * bdev)299 static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev)
300 {
301 	return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL);
302 }
303 
btmtksdio_tx_work(struct work_struct * work)304 static void btmtksdio_tx_work(struct work_struct *work)
305 {
306 	struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
307 						  tx_work);
308 	struct sk_buff *skb;
309 	int err;
310 
311 	pm_runtime_get_sync(bdev->dev);
312 
313 	sdio_claim_host(bdev->func);
314 
315 	while ((skb = skb_dequeue(&bdev->txq))) {
316 		err = btmtksdio_tx_packet(bdev, skb);
317 		if (err < 0) {
318 			bdev->hdev->stat.err_tx++;
319 			skb_queue_head(&bdev->txq, skb);
320 			break;
321 		}
322 	}
323 
324 	sdio_release_host(bdev->func);
325 
326 	pm_runtime_mark_last_busy(bdev->dev);
327 	pm_runtime_put_autosuspend(bdev->dev);
328 }
329 
btmtksdio_recv_event(struct hci_dev * hdev,struct sk_buff * skb)330 static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
331 {
332 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
333 	struct hci_event_hdr *hdr = (void *)skb->data;
334 	int err;
335 
336 	/* Fix up the vendor event id with 0xff for vendor specific instead
337 	 * of 0xe4 so that event send via monitoring socket can be parsed
338 	 * properly.
339 	 */
340 	if (hdr->evt == 0xe4)
341 		hdr->evt = HCI_EV_VENDOR;
342 
343 	/* When someone waits for the WMT event, the skb is being cloned
344 	 * and being processed the events from there then.
345 	 */
346 	if (test_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) {
347 		bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
348 		if (!bdev->evt_skb) {
349 			err = -ENOMEM;
350 			goto err_out;
351 		}
352 	}
353 
354 	err = hci_recv_frame(hdev, skb);
355 	if (err < 0)
356 		goto err_free_skb;
357 
358 	if (hdr->evt == HCI_EV_VENDOR) {
359 		if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT,
360 				       &bdev->tx_state)) {
361 			/* Barrier to sync with other CPUs */
362 			smp_mb__after_atomic();
363 			wake_up_bit(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT);
364 		}
365 	}
366 
367 	return 0;
368 
369 err_free_skb:
370 	kfree_skb(bdev->evt_skb);
371 	bdev->evt_skb = NULL;
372 
373 err_out:
374 	return err;
375 }
376 
377 static const struct h4_recv_pkt mtk_recv_pkts[] = {
378 	{ H4_RECV_ACL,      .recv = hci_recv_frame },
379 	{ H4_RECV_SCO,      .recv = hci_recv_frame },
380 	{ H4_RECV_EVENT,    .recv = btmtksdio_recv_event },
381 };
382 
btmtksdio_rx_packet(struct btmtksdio_dev * bdev,u16 rx_size)383 static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size)
384 {
385 	const struct h4_recv_pkt *pkts = mtk_recv_pkts;
386 	int pkts_count = ARRAY_SIZE(mtk_recv_pkts);
387 	struct mtkbtsdio_hdr *sdio_hdr;
388 	int err, i, pad_size;
389 	struct sk_buff *skb;
390 	u16 dlen;
391 
392 	if (rx_size < sizeof(*sdio_hdr))
393 		return -EILSEQ;
394 
395 	/* A SDIO packet is exactly containing a Bluetooth packet */
396 	skb = bt_skb_alloc(rx_size, GFP_KERNEL);
397 	if (!skb)
398 		return -ENOMEM;
399 
400 	skb_put(skb, rx_size);
401 
402 	err = sdio_readsb(bdev->func, skb->data, MTK_REG_CRDR, rx_size);
403 	if (err < 0)
404 		goto err_kfree_skb;
405 
406 	sdio_hdr = (void *)skb->data;
407 
408 	/* We assume the default error as -EILSEQ simply to make the error path
409 	 * be cleaner.
410 	 */
411 	err = -EILSEQ;
412 
413 	if (rx_size != le16_to_cpu(sdio_hdr->len)) {
414 		bt_dev_err(bdev->hdev, "Rx size in sdio header is mismatched ");
415 		goto err_kfree_skb;
416 	}
417 
418 	hci_skb_pkt_type(skb) = sdio_hdr->bt_type;
419 
420 	/* Remove MediaTek SDIO header */
421 	skb_pull(skb, sizeof(*sdio_hdr));
422 
423 	/* We have to dig into the packet to get payload size and then know how
424 	 * many padding bytes at the tail, these padding bytes should be removed
425 	 * before the packet is indicated to the core layer.
426 	 */
427 	for (i = 0; i < pkts_count; i++) {
428 		if (sdio_hdr->bt_type == (&pkts[i])->type)
429 			break;
430 	}
431 
432 	if (i >= pkts_count) {
433 		bt_dev_err(bdev->hdev, "Invalid bt type 0x%02x",
434 			   sdio_hdr->bt_type);
435 		goto err_kfree_skb;
436 	}
437 
438 	/* Remaining bytes cannot hold a header*/
439 	if (skb->len < (&pkts[i])->hlen) {
440 		bt_dev_err(bdev->hdev, "The size of bt header is mismatched");
441 		goto err_kfree_skb;
442 	}
443 
444 	switch ((&pkts[i])->lsize) {
445 		case 1:
446 			dlen = skb->data[(&pkts[i])->loff];
447 			break;
448 		case 2:
449 			dlen = get_unaligned_le16(skb->data +
450 						  (&pkts[i])->loff);
451 			break;
452 		default:
453 			goto err_kfree_skb;
454 	}
455 
456 	pad_size = skb->len - (&pkts[i])->hlen -  dlen;
457 
458 	/* Remaining bytes cannot hold a payload */
459 	if (pad_size < 0) {
460 		bt_dev_err(bdev->hdev, "The size of bt payload is mismatched");
461 		goto err_kfree_skb;
462 	}
463 
464 	/* Remove padding bytes */
465 	skb_trim(skb, skb->len - pad_size);
466 
467 	/* Complete frame */
468 	(&pkts[i])->recv(bdev->hdev, skb);
469 
470 	bdev->hdev->stat.byte_rx += rx_size;
471 
472 	return 0;
473 
474 err_kfree_skb:
475 	kfree_skb(skb);
476 
477 	return err;
478 }
479 
btmtksdio_interrupt(struct sdio_func * func)480 static void btmtksdio_interrupt(struct sdio_func *func)
481 {
482 	struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
483 	u32 int_status;
484 	u16 rx_size;
485 
486 	/* It is required that the host gets ownership from the device before
487 	 * accessing any register, however, if SDIO host is not being released,
488 	 * a potential deadlock probably happens in a circular wait between SDIO
489 	 * IRQ work and PM runtime work. So, we have to explicitly release SDIO
490 	 * host here and claim again after the PM runtime work is all done.
491 	 */
492 	sdio_release_host(bdev->func);
493 
494 	pm_runtime_get_sync(bdev->dev);
495 
496 	sdio_claim_host(bdev->func);
497 
498 	/* Disable interrupt */
499 	sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
500 
501 	int_status = sdio_readl(func, MTK_REG_CHISR, NULL);
502 
503 	/* Ack an interrupt as soon as possible before any operation on
504 	 * hardware.
505 	 *
506 	 * Note that we don't ack any status during operations to avoid race
507 	 * condition between the host and the device such as it's possible to
508 	 * mistakenly ack RX_DONE for the next packet and then cause interrupts
509 	 * not be raised again but there is still pending data in the hardware
510 	 * FIFO.
511 	 */
512 	sdio_writel(func, int_status, MTK_REG_CHISR, NULL);
513 
514 	if (unlikely(!int_status))
515 		bt_dev_err(bdev->hdev, "CHISR is 0");
516 
517 	if (int_status & FW_OWN_BACK_INT)
518 		bt_dev_dbg(bdev->hdev, "Get fw own back");
519 
520 	if (int_status & TX_EMPTY)
521 		schedule_work(&bdev->tx_work);
522 	else if (unlikely(int_status & TX_FIFO_OVERFLOW))
523 		bt_dev_warn(bdev->hdev, "Tx fifo overflow");
524 
525 	if (int_status & RX_DONE_INT) {
526 		rx_size = (int_status & RX_PKT_LEN) >> 16;
527 
528 		if (btmtksdio_rx_packet(bdev, rx_size) < 0)
529 			bdev->hdev->stat.err_rx++;
530 	}
531 
532 	/* Enable interrupt */
533 	sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
534 
535 	pm_runtime_mark_last_busy(bdev->dev);
536 	pm_runtime_put_autosuspend(bdev->dev);
537 }
538 
btmtksdio_open(struct hci_dev * hdev)539 static int btmtksdio_open(struct hci_dev *hdev)
540 {
541 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
542 	int err;
543 	u32 status;
544 
545 	sdio_claim_host(bdev->func);
546 
547 	err = sdio_enable_func(bdev->func);
548 	if (err < 0)
549 		goto err_release_host;
550 
551 	/* Get ownership from the device */
552 	sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
553 	if (err < 0)
554 		goto err_disable_func;
555 
556 	err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
557 				 status & C_COM_DRV_OWN, 2000, 1000000);
558 	if (err < 0) {
559 		bt_dev_err(bdev->hdev, "Cannot get ownership from device");
560 		goto err_disable_func;
561 	}
562 
563 	/* Disable interrupt & mask out all interrupt sources */
564 	sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err);
565 	if (err < 0)
566 		goto err_disable_func;
567 
568 	sdio_writel(bdev->func, 0, MTK_REG_CHIER, &err);
569 	if (err < 0)
570 		goto err_disable_func;
571 
572 	err = sdio_claim_irq(bdev->func, btmtksdio_interrupt);
573 	if (err < 0)
574 		goto err_disable_func;
575 
576 	err = sdio_set_block_size(bdev->func, MTK_SDIO_BLOCK_SIZE);
577 	if (err < 0)
578 		goto err_release_irq;
579 
580 	/* SDIO CMD 5 allows the SDIO device back to idle state an
581 	 * synchronous interrupt is supported in SDIO 4-bit mode
582 	 */
583 	sdio_writel(bdev->func, SDIO_INT_CTL | SDIO_RE_INIT_EN,
584 		    MTK_REG_CSDIOCSR, &err);
585 	if (err < 0)
586 		goto err_release_irq;
587 
588 	/* Setup write-1-clear for CHISR register */
589 	sdio_writel(bdev->func, C_INT_CLR_CTRL, MTK_REG_CHCR, &err);
590 	if (err < 0)
591 		goto err_release_irq;
592 
593 	/* Setup interrupt sources */
594 	sdio_writel(bdev->func, RX_DONE_INT | TX_EMPTY | TX_FIFO_OVERFLOW,
595 		    MTK_REG_CHIER, &err);
596 	if (err < 0)
597 		goto err_release_irq;
598 
599 	/* Enable interrupt */
600 	sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, &err);
601 	if (err < 0)
602 		goto err_release_irq;
603 
604 	sdio_release_host(bdev->func);
605 
606 	return 0;
607 
608 err_release_irq:
609 	sdio_release_irq(bdev->func);
610 
611 err_disable_func:
612 	sdio_disable_func(bdev->func);
613 
614 err_release_host:
615 	sdio_release_host(bdev->func);
616 
617 	return err;
618 }
619 
btmtksdio_close(struct hci_dev * hdev)620 static int btmtksdio_close(struct hci_dev *hdev)
621 {
622 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
623 	u32 status;
624 	int err;
625 
626 	sdio_claim_host(bdev->func);
627 
628 	/* Disable interrupt */
629 	sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
630 
631 	sdio_release_irq(bdev->func);
632 
633 	/* Return ownership to the device */
634 	sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL);
635 
636 	err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
637 				 !(status & C_COM_DRV_OWN), 2000, 1000000);
638 	if (err < 0)
639 		bt_dev_err(bdev->hdev, "Cannot return ownership to device");
640 
641 	sdio_disable_func(bdev->func);
642 
643 	sdio_release_host(bdev->func);
644 
645 	return 0;
646 }
647 
btmtksdio_flush(struct hci_dev * hdev)648 static int btmtksdio_flush(struct hci_dev *hdev)
649 {
650 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
651 
652 	skb_queue_purge(&bdev->txq);
653 
654 	cancel_work_sync(&bdev->tx_work);
655 
656 	return 0;
657 }
658 
btmtksdio_func_query(struct hci_dev * hdev)659 static int btmtksdio_func_query(struct hci_dev *hdev)
660 {
661 	struct btmtk_hci_wmt_params wmt_params;
662 	int status, err;
663 	u8 param = 0;
664 
665 	/* Query whether the function is enabled */
666 	wmt_params.op = MTK_WMT_FUNC_CTRL;
667 	wmt_params.flag = 4;
668 	wmt_params.dlen = sizeof(param);
669 	wmt_params.data = &param;
670 	wmt_params.status = &status;
671 
672 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
673 	if (err < 0) {
674 		bt_dev_err(hdev, "Failed to query function status (%d)", err);
675 		return err;
676 	}
677 
678 	return status;
679 }
680 
mtk_setup_firmware(struct hci_dev * hdev,const char * fwname)681 static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
682 {
683 	struct btmtk_hci_wmt_params wmt_params;
684 	const struct firmware *fw;
685 	const u8 *fw_ptr;
686 	size_t fw_size;
687 	int err, dlen;
688 	u8 flag, param;
689 
690 	err = request_firmware(&fw, fwname, &hdev->dev);
691 	if (err < 0) {
692 		bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
693 		return err;
694 	}
695 
696 	/* Power on data RAM the firmware relies on. */
697 	param = 1;
698 	wmt_params.op = MTK_WMT_FUNC_CTRL;
699 	wmt_params.flag = 3;
700 	wmt_params.dlen = sizeof(param);
701 	wmt_params.data = &param;
702 	wmt_params.status = NULL;
703 
704 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
705 	if (err < 0) {
706 		bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
707 		return err;
708 	}
709 
710 	fw_ptr = fw->data;
711 	fw_size = fw->size;
712 
713 	/* The size of patch header is 30 bytes, should be skip */
714 	if (fw_size < 30) {
715 		err = -EINVAL;
716 		goto free_fw;
717 	}
718 
719 	fw_size -= 30;
720 	fw_ptr += 30;
721 	flag = 1;
722 
723 	wmt_params.op = MTK_WMT_PATCH_DWNLD;
724 	wmt_params.status = NULL;
725 
726 	while (fw_size > 0) {
727 		dlen = min_t(int, 250, fw_size);
728 
729 		/* Tell device the position in sequence */
730 		if (fw_size - dlen <= 0)
731 			flag = 3;
732 		else if (fw_size < fw->size - 30)
733 			flag = 2;
734 
735 		wmt_params.flag = flag;
736 		wmt_params.dlen = dlen;
737 		wmt_params.data = fw_ptr;
738 
739 		err = mtk_hci_wmt_sync(hdev, &wmt_params);
740 		if (err < 0) {
741 			bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
742 				   err);
743 			goto free_fw;
744 		}
745 
746 		fw_size -= dlen;
747 		fw_ptr += dlen;
748 	}
749 
750 	wmt_params.op = MTK_WMT_RST;
751 	wmt_params.flag = 4;
752 	wmt_params.dlen = 0;
753 	wmt_params.data = NULL;
754 	wmt_params.status = NULL;
755 
756 	/* Activate funciton the firmware providing to */
757 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
758 	if (err < 0) {
759 		bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
760 		goto free_fw;
761 	}
762 
763 	/* Wait a few moments for firmware activation done */
764 	usleep_range(10000, 12000);
765 
766 free_fw:
767 	release_firmware(fw);
768 	return err;
769 }
770 
btmtksdio_setup(struct hci_dev * hdev)771 static int btmtksdio_setup(struct hci_dev *hdev)
772 {
773 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
774 	struct btmtk_hci_wmt_params wmt_params;
775 	ktime_t calltime, delta, rettime;
776 	struct btmtk_tci_sleep tci_sleep;
777 	unsigned long long duration;
778 	struct sk_buff *skb;
779 	int err, status;
780 	u8 param = 0x1;
781 
782 	calltime = ktime_get();
783 
784 	/* Query whether the firmware is already download */
785 	wmt_params.op = MTK_WMT_SEMAPHORE;
786 	wmt_params.flag = 1;
787 	wmt_params.dlen = 0;
788 	wmt_params.data = NULL;
789 	wmt_params.status = &status;
790 
791 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
792 	if (err < 0) {
793 		bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
794 		return err;
795 	}
796 
797 	if (status == BTMTK_WMT_PATCH_DONE) {
798 		bt_dev_info(hdev, "Firmware already downloaded");
799 		goto ignore_setup_fw;
800 	}
801 
802 	/* Setup a firmware which the device definitely requires */
803 	err = mtk_setup_firmware(hdev, bdev->data->fwname);
804 	if (err < 0)
805 		return err;
806 
807 ignore_setup_fw:
808 	/* Query whether the device is already enabled */
809 	err = readx_poll_timeout(btmtksdio_func_query, hdev, status,
810 				 status < 0 || status != BTMTK_WMT_ON_PROGRESS,
811 				 2000, 5000000);
812 	/* -ETIMEDOUT happens */
813 	if (err < 0)
814 		return err;
815 
816 	/* The other errors happen in btusb_mtk_func_query */
817 	if (status < 0)
818 		return status;
819 
820 	if (status == BTMTK_WMT_ON_DONE) {
821 		bt_dev_info(hdev, "function already on");
822 		goto ignore_func_on;
823 	}
824 
825 	/* Enable Bluetooth protocol */
826 	wmt_params.op = MTK_WMT_FUNC_CTRL;
827 	wmt_params.flag = 0;
828 	wmt_params.dlen = sizeof(param);
829 	wmt_params.data = &param;
830 	wmt_params.status = NULL;
831 
832 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
833 	if (err < 0) {
834 		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
835 		return err;
836 	}
837 
838 ignore_func_on:
839 	/* Apply the low power environment setup */
840 	tci_sleep.mode = 0x5;
841 	tci_sleep.duration = cpu_to_le16(0x640);
842 	tci_sleep.host_duration = cpu_to_le16(0x640);
843 	tci_sleep.host_wakeup_pin = 0;
844 	tci_sleep.time_compensation = 0;
845 
846 	skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
847 			     HCI_INIT_TIMEOUT);
848 	if (IS_ERR(skb)) {
849 		err = PTR_ERR(skb);
850 		bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
851 		return err;
852 	}
853 	kfree_skb(skb);
854 
855 	rettime = ktime_get();
856 	delta = ktime_sub(rettime, calltime);
857 	duration = (unsigned long long)ktime_to_ns(delta) >> 10;
858 
859 	pm_runtime_set_autosuspend_delay(bdev->dev,
860 					 MTKBTSDIO_AUTOSUSPEND_DELAY);
861 	pm_runtime_use_autosuspend(bdev->dev);
862 
863 	err = pm_runtime_set_active(bdev->dev);
864 	if (err < 0)
865 		return err;
866 
867 	/* Default forbid runtime auto suspend, that can be allowed by
868 	 * enable_autosuspend flag or the PM runtime entry under sysfs.
869 	 */
870 	pm_runtime_forbid(bdev->dev);
871 	pm_runtime_enable(bdev->dev);
872 
873 	if (enable_autosuspend)
874 		pm_runtime_allow(bdev->dev);
875 
876 	bt_dev_info(hdev, "Device setup in %llu usecs", duration);
877 
878 	return 0;
879 }
880 
btmtksdio_shutdown(struct hci_dev * hdev)881 static int btmtksdio_shutdown(struct hci_dev *hdev)
882 {
883 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
884 	struct btmtk_hci_wmt_params wmt_params;
885 	u8 param = 0x0;
886 	int err;
887 
888 	/* Get back the state to be consistent with the state
889 	 * in btmtksdio_setup.
890 	 */
891 	pm_runtime_get_sync(bdev->dev);
892 
893 	/* Disable the device */
894 	wmt_params.op = MTK_WMT_FUNC_CTRL;
895 	wmt_params.flag = 0;
896 	wmt_params.dlen = sizeof(param);
897 	wmt_params.data = &param;
898 	wmt_params.status = NULL;
899 
900 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
901 	if (err < 0) {
902 		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
903 		return err;
904 	}
905 
906 	pm_runtime_put_noidle(bdev->dev);
907 	pm_runtime_disable(bdev->dev);
908 
909 	return 0;
910 }
911 
btmtksdio_send_frame(struct hci_dev * hdev,struct sk_buff * skb)912 static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
913 {
914 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
915 
916 	switch (hci_skb_pkt_type(skb)) {
917 	case HCI_COMMAND_PKT:
918 		hdev->stat.cmd_tx++;
919 		break;
920 
921 	case HCI_ACLDATA_PKT:
922 		hdev->stat.acl_tx++;
923 		break;
924 
925 	case HCI_SCODATA_PKT:
926 		hdev->stat.sco_tx++;
927 		break;
928 
929 	default:
930 		return -EILSEQ;
931 	}
932 
933 	skb_queue_tail(&bdev->txq, skb);
934 
935 	schedule_work(&bdev->tx_work);
936 
937 	return 0;
938 }
939 
btmtksdio_probe(struct sdio_func * func,const struct sdio_device_id * id)940 static int btmtksdio_probe(struct sdio_func *func,
941 			   const struct sdio_device_id *id)
942 {
943 	struct btmtksdio_dev *bdev;
944 	struct hci_dev *hdev;
945 	int err;
946 
947 	bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL);
948 	if (!bdev)
949 		return -ENOMEM;
950 
951 	bdev->data = (void *)id->driver_data;
952 	if (!bdev->data)
953 		return -ENODEV;
954 
955 	bdev->dev = &func->dev;
956 	bdev->func = func;
957 
958 	INIT_WORK(&bdev->tx_work, btmtksdio_tx_work);
959 	skb_queue_head_init(&bdev->txq);
960 
961 	/* Initialize and register HCI device */
962 	hdev = hci_alloc_dev();
963 	if (!hdev) {
964 		dev_err(&func->dev, "Can't allocate HCI device\n");
965 		return -ENOMEM;
966 	}
967 
968 	bdev->hdev = hdev;
969 
970 	hdev->bus = HCI_SDIO;
971 	hci_set_drvdata(hdev, bdev);
972 
973 	hdev->open     = btmtksdio_open;
974 	hdev->close    = btmtksdio_close;
975 	hdev->flush    = btmtksdio_flush;
976 	hdev->setup    = btmtksdio_setup;
977 	hdev->shutdown = btmtksdio_shutdown;
978 	hdev->send     = btmtksdio_send_frame;
979 	SET_HCIDEV_DEV(hdev, &func->dev);
980 
981 	hdev->manufacturer = 70;
982 	set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
983 
984 	err = hci_register_dev(hdev);
985 	if (err < 0) {
986 		dev_err(&func->dev, "Can't register HCI device\n");
987 		hci_free_dev(hdev);
988 		return err;
989 	}
990 
991 	sdio_set_drvdata(func, bdev);
992 
993 	/* pm_runtime_enable would be done after the firmware is being
994 	 * downloaded because the core layer probably already enables
995 	 * runtime PM for this func such as the case host->caps &
996 	 * MMC_CAP_POWER_OFF_CARD.
997 	 */
998 	if (pm_runtime_enabled(bdev->dev))
999 		pm_runtime_disable(bdev->dev);
1000 
1001 	/* As explaination in drivers/mmc/core/sdio_bus.c tells us:
1002 	 * Unbound SDIO functions are always suspended.
1003 	 * During probe, the function is set active and the usage count
1004 	 * is incremented.  If the driver supports runtime PM,
1005 	 * it should call pm_runtime_put_noidle() in its probe routine and
1006 	 * pm_runtime_get_noresume() in its remove routine.
1007 	 *
1008 	 * So, put a pm_runtime_put_noidle here !
1009 	 */
1010 	pm_runtime_put_noidle(bdev->dev);
1011 
1012 	return 0;
1013 }
1014 
btmtksdio_remove(struct sdio_func * func)1015 static void btmtksdio_remove(struct sdio_func *func)
1016 {
1017 	struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
1018 	struct hci_dev *hdev;
1019 
1020 	if (!bdev)
1021 		return;
1022 
1023 	/* Be consistent the state in btmtksdio_probe */
1024 	pm_runtime_get_noresume(bdev->dev);
1025 
1026 	hdev = bdev->hdev;
1027 
1028 	sdio_set_drvdata(func, NULL);
1029 	hci_unregister_dev(hdev);
1030 	hci_free_dev(hdev);
1031 }
1032 
1033 #ifdef CONFIG_PM
btmtksdio_runtime_suspend(struct device * dev)1034 static int btmtksdio_runtime_suspend(struct device *dev)
1035 {
1036 	struct sdio_func *func = dev_to_sdio_func(dev);
1037 	struct btmtksdio_dev *bdev;
1038 	u32 status;
1039 	int err;
1040 
1041 	bdev = sdio_get_drvdata(func);
1042 	if (!bdev)
1043 		return 0;
1044 
1045 	sdio_claim_host(bdev->func);
1046 
1047 	sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
1048 	if (err < 0)
1049 		goto out;
1050 
1051 	err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
1052 				 !(status & C_COM_DRV_OWN), 2000, 1000000);
1053 out:
1054 	bt_dev_info(bdev->hdev, "status (%d) return ownership to device", err);
1055 
1056 	sdio_release_host(bdev->func);
1057 
1058 	return err;
1059 }
1060 
btmtksdio_runtime_resume(struct device * dev)1061 static int btmtksdio_runtime_resume(struct device *dev)
1062 {
1063 	struct sdio_func *func = dev_to_sdio_func(dev);
1064 	struct btmtksdio_dev *bdev;
1065 	u32 status;
1066 	int err;
1067 
1068 	bdev = sdio_get_drvdata(func);
1069 	if (!bdev)
1070 		return 0;
1071 
1072 	sdio_claim_host(bdev->func);
1073 
1074 	sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
1075 	if (err < 0)
1076 		goto out;
1077 
1078 	err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
1079 				 status & C_COM_DRV_OWN, 2000, 1000000);
1080 out:
1081 	bt_dev_info(bdev->hdev, "status (%d) get ownership from device", err);
1082 
1083 	sdio_release_host(bdev->func);
1084 
1085 	return err;
1086 }
1087 
1088 static UNIVERSAL_DEV_PM_OPS(btmtksdio_pm_ops, btmtksdio_runtime_suspend,
1089 			    btmtksdio_runtime_resume, NULL);
1090 #define BTMTKSDIO_PM_OPS (&btmtksdio_pm_ops)
1091 #else	/* CONFIG_PM */
1092 #define BTMTKSDIO_PM_OPS NULL
1093 #endif	/* CONFIG_PM */
1094 
1095 static struct sdio_driver btmtksdio_driver = {
1096 	.name		= "btmtksdio",
1097 	.probe		= btmtksdio_probe,
1098 	.remove		= btmtksdio_remove,
1099 	.id_table	= btmtksdio_table,
1100 	.drv = {
1101 		.owner = THIS_MODULE,
1102 		.pm = BTMTKSDIO_PM_OPS,
1103 	}
1104 };
1105 
1106 module_sdio_driver(btmtksdio_driver);
1107 
1108 module_param(enable_autosuspend, bool, 0644);
1109 MODULE_PARM_DESC(enable_autosuspend, "Enable autosuspend by default");
1110 
1111 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
1112 MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION);
1113 MODULE_VERSION(VERSION);
1114 MODULE_LICENSE("GPL");
1115 MODULE_FIRMWARE(FIRMWARE_MT7663);
1116 MODULE_FIRMWARE(FIRMWARE_MT7668);
1117