1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37 
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42 
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50 
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54 
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58 
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62 
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65 
hci_scan_req(struct hci_request * req,unsigned long opt)66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68 	__u8 scan = opt;
69 
70 	BT_DBG("%s %x", req->hdev->name, scan);
71 
72 	/* Inquiry and Page scans */
73 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74 	return 0;
75 }
76 
hci_auth_req(struct hci_request * req,unsigned long opt)77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79 	__u8 auth = opt;
80 
81 	BT_DBG("%s %x", req->hdev->name, auth);
82 
83 	/* Authentication */
84 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85 	return 0;
86 }
87 
hci_encrypt_req(struct hci_request * req,unsigned long opt)88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90 	__u8 encrypt = opt;
91 
92 	BT_DBG("%s %x", req->hdev->name, encrypt);
93 
94 	/* Encryption */
95 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96 	return 0;
97 }
98 
hci_linkpol_req(struct hci_request * req,unsigned long opt)99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101 	__le16 policy = cpu_to_le16(opt);
102 
103 	BT_DBG("%s %x", req->hdev->name, policy);
104 
105 	/* Default link policy */
106 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107 	return 0;
108 }
109 
110 /* Get HCI device by index.
111  * Device is held on return. */
hci_dev_get(int index)112 struct hci_dev *hci_dev_get(int index)
113 {
114 	struct hci_dev *hdev = NULL, *d;
115 
116 	BT_DBG("%d", index);
117 
118 	if (index < 0)
119 		return NULL;
120 
121 	read_lock(&hci_dev_list_lock);
122 	list_for_each_entry(d, &hci_dev_list, list) {
123 		if (d->id == index) {
124 			hdev = hci_dev_hold(d);
125 			break;
126 		}
127 	}
128 	read_unlock(&hci_dev_list_lock);
129 	return hdev;
130 }
131 
132 /* ---- Inquiry support ---- */
133 
hci_discovery_active(struct hci_dev * hdev)134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136 	struct discovery_state *discov = &hdev->discovery;
137 
138 	switch (discov->state) {
139 	case DISCOVERY_FINDING:
140 	case DISCOVERY_RESOLVING:
141 		return true;
142 
143 	default:
144 		return false;
145 	}
146 }
147 
hci_discovery_set_state(struct hci_dev * hdev,int state)148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150 	int old_state = hdev->discovery.state;
151 
152 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153 
154 	if (old_state == state)
155 		return;
156 
157 	hdev->discovery.state = state;
158 
159 	switch (state) {
160 	case DISCOVERY_STOPPED:
161 		hci_update_passive_scan(hdev);
162 
163 		if (old_state != DISCOVERY_STARTING)
164 			mgmt_discovering(hdev, 0);
165 		break;
166 	case DISCOVERY_STARTING:
167 		break;
168 	case DISCOVERY_FINDING:
169 		mgmt_discovering(hdev, 1);
170 		break;
171 	case DISCOVERY_RESOLVING:
172 		break;
173 	case DISCOVERY_STOPPING:
174 		break;
175 	}
176 }
177 
hci_inquiry_cache_flush(struct hci_dev * hdev)178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
179 {
180 	struct discovery_state *cache = &hdev->discovery;
181 	struct inquiry_entry *p, *n;
182 
183 	list_for_each_entry_safe(p, n, &cache->all, all) {
184 		list_del(&p->all);
185 		kfree(p);
186 	}
187 
188 	INIT_LIST_HEAD(&cache->unknown);
189 	INIT_LIST_HEAD(&cache->resolve);
190 }
191 
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
193 					       bdaddr_t *bdaddr)
194 {
195 	struct discovery_state *cache = &hdev->discovery;
196 	struct inquiry_entry *e;
197 
198 	BT_DBG("cache %p, %pMR", cache, bdaddr);
199 
200 	list_for_each_entry(e, &cache->all, all) {
201 		if (!bacmp(&e->data.bdaddr, bdaddr))
202 			return e;
203 	}
204 
205 	return NULL;
206 }
207 
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
209 						       bdaddr_t *bdaddr)
210 {
211 	struct discovery_state *cache = &hdev->discovery;
212 	struct inquiry_entry *e;
213 
214 	BT_DBG("cache %p, %pMR", cache, bdaddr);
215 
216 	list_for_each_entry(e, &cache->unknown, list) {
217 		if (!bacmp(&e->data.bdaddr, bdaddr))
218 			return e;
219 	}
220 
221 	return NULL;
222 }
223 
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
225 						       bdaddr_t *bdaddr,
226 						       int state)
227 {
228 	struct discovery_state *cache = &hdev->discovery;
229 	struct inquiry_entry *e;
230 
231 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232 
233 	list_for_each_entry(e, &cache->resolve, list) {
234 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
235 			return e;
236 		if (!bacmp(&e->data.bdaddr, bdaddr))
237 			return e;
238 	}
239 
240 	return NULL;
241 }
242 
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244 				      struct inquiry_entry *ie)
245 {
246 	struct discovery_state *cache = &hdev->discovery;
247 	struct list_head *pos = &cache->resolve;
248 	struct inquiry_entry *p;
249 
250 	list_del(&ie->list);
251 
252 	list_for_each_entry(p, &cache->resolve, list) {
253 		if (p->name_state != NAME_PENDING &&
254 		    abs(p->data.rssi) >= abs(ie->data.rssi))
255 			break;
256 		pos = &p->list;
257 	}
258 
259 	list_add(&ie->list, pos);
260 }
261 
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
263 			     bool name_known)
264 {
265 	struct discovery_state *cache = &hdev->discovery;
266 	struct inquiry_entry *ie;
267 	u32 flags = 0;
268 
269 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270 
271 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
272 
273 	if (!data->ssp_mode)
274 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275 
276 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
277 	if (ie) {
278 		if (!ie->data.ssp_mode)
279 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280 
281 		if (ie->name_state == NAME_NEEDED &&
282 		    data->rssi != ie->data.rssi) {
283 			ie->data.rssi = data->rssi;
284 			hci_inquiry_cache_update_resolve(hdev, ie);
285 		}
286 
287 		goto update;
288 	}
289 
290 	/* Entry not in the cache. Add new one. */
291 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
292 	if (!ie) {
293 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
294 		goto done;
295 	}
296 
297 	list_add(&ie->all, &cache->all);
298 
299 	if (name_known) {
300 		ie->name_state = NAME_KNOWN;
301 	} else {
302 		ie->name_state = NAME_NOT_KNOWN;
303 		list_add(&ie->list, &cache->unknown);
304 	}
305 
306 update:
307 	if (name_known && ie->name_state != NAME_KNOWN &&
308 	    ie->name_state != NAME_PENDING) {
309 		ie->name_state = NAME_KNOWN;
310 		list_del(&ie->list);
311 	}
312 
313 	memcpy(&ie->data, data, sizeof(*data));
314 	ie->timestamp = jiffies;
315 	cache->timestamp = jiffies;
316 
317 	if (ie->name_state == NAME_NOT_KNOWN)
318 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
319 
320 done:
321 	return flags;
322 }
323 
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325 {
326 	struct discovery_state *cache = &hdev->discovery;
327 	struct inquiry_info *info = (struct inquiry_info *) buf;
328 	struct inquiry_entry *e;
329 	int copied = 0;
330 
331 	list_for_each_entry(e, &cache->all, all) {
332 		struct inquiry_data *data = &e->data;
333 
334 		if (copied >= num)
335 			break;
336 
337 		bacpy(&info->bdaddr, &data->bdaddr);
338 		info->pscan_rep_mode	= data->pscan_rep_mode;
339 		info->pscan_period_mode	= data->pscan_period_mode;
340 		info->pscan_mode	= data->pscan_mode;
341 		memcpy(info->dev_class, data->dev_class, 3);
342 		info->clock_offset	= data->clock_offset;
343 
344 		info++;
345 		copied++;
346 	}
347 
348 	BT_DBG("cache %p, copied %d", cache, copied);
349 	return copied;
350 }
351 
hci_inq_req(struct hci_request * req,unsigned long opt)352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
353 {
354 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 	struct hci_dev *hdev = req->hdev;
356 	struct hci_cp_inquiry cp;
357 
358 	BT_DBG("%s", hdev->name);
359 
360 	if (test_bit(HCI_INQUIRY, &hdev->flags))
361 		return 0;
362 
363 	/* Start Inquiry */
364 	memcpy(&cp.lap, &ir->lap, 3);
365 	cp.length  = ir->length;
366 	cp.num_rsp = ir->num_rsp;
367 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
368 
369 	return 0;
370 }
371 
hci_inquiry(void __user * arg)372 int hci_inquiry(void __user *arg)
373 {
374 	__u8 __user *ptr = arg;
375 	struct hci_inquiry_req ir;
376 	struct hci_dev *hdev;
377 	int err = 0, do_inquiry = 0, max_rsp;
378 	long timeo;
379 	__u8 *buf;
380 
381 	if (copy_from_user(&ir, ptr, sizeof(ir)))
382 		return -EFAULT;
383 
384 	hdev = hci_dev_get(ir.dev_id);
385 	if (!hdev)
386 		return -ENODEV;
387 
388 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
389 		err = -EBUSY;
390 		goto done;
391 	}
392 
393 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
394 		err = -EOPNOTSUPP;
395 		goto done;
396 	}
397 
398 	if (hdev->dev_type != HCI_PRIMARY) {
399 		err = -EOPNOTSUPP;
400 		goto done;
401 	}
402 
403 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
404 		err = -EOPNOTSUPP;
405 		goto done;
406 	}
407 
408 	/* Restrict maximum inquiry length to 60 seconds */
409 	if (ir.length > 60) {
410 		err = -EINVAL;
411 		goto done;
412 	}
413 
414 	hci_dev_lock(hdev);
415 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417 		hci_inquiry_cache_flush(hdev);
418 		do_inquiry = 1;
419 	}
420 	hci_dev_unlock(hdev);
421 
422 	timeo = ir.length * msecs_to_jiffies(2000);
423 
424 	if (do_inquiry) {
425 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
426 				   timeo, NULL);
427 		if (err < 0)
428 			goto done;
429 
430 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431 		 * cleared). If it is interrupted by a signal, return -EINTR.
432 		 */
433 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434 				TASK_INTERRUPTIBLE)) {
435 			err = -EINTR;
436 			goto done;
437 		}
438 	}
439 
440 	/* for unlimited number of responses we will use buffer with
441 	 * 255 entries
442 	 */
443 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
444 
445 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
446 	 * copy it to the user space.
447 	 */
448 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
449 	if (!buf) {
450 		err = -ENOMEM;
451 		goto done;
452 	}
453 
454 	hci_dev_lock(hdev);
455 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456 	hci_dev_unlock(hdev);
457 
458 	BT_DBG("num_rsp %d", ir.num_rsp);
459 
460 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
461 		ptr += sizeof(ir);
462 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
463 				 ir.num_rsp))
464 			err = -EFAULT;
465 	} else
466 		err = -EFAULT;
467 
468 	kfree(buf);
469 
470 done:
471 	hci_dev_put(hdev);
472 	return err;
473 }
474 
hci_dev_do_open(struct hci_dev * hdev)475 static int hci_dev_do_open(struct hci_dev *hdev)
476 {
477 	int ret = 0;
478 
479 	BT_DBG("%s %p", hdev->name, hdev);
480 
481 	hci_req_sync_lock(hdev);
482 
483 	ret = hci_dev_open_sync(hdev);
484 
485 	hci_req_sync_unlock(hdev);
486 	return ret;
487 }
488 
489 /* ---- HCI ioctl helpers ---- */
490 
hci_dev_open(__u16 dev)491 int hci_dev_open(__u16 dev)
492 {
493 	struct hci_dev *hdev;
494 	int err;
495 
496 	hdev = hci_dev_get(dev);
497 	if (!hdev)
498 		return -ENODEV;
499 
500 	/* Devices that are marked as unconfigured can only be powered
501 	 * up as user channel. Trying to bring them up as normal devices
502 	 * will result into a failure. Only user channel operation is
503 	 * possible.
504 	 *
505 	 * When this function is called for a user channel, the flag
506 	 * HCI_USER_CHANNEL will be set first before attempting to
507 	 * open the device.
508 	 */
509 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
511 		err = -EOPNOTSUPP;
512 		goto done;
513 	}
514 
515 	/* We need to ensure that no other power on/off work is pending
516 	 * before proceeding to call hci_dev_do_open. This is
517 	 * particularly important if the setup procedure has not yet
518 	 * completed.
519 	 */
520 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521 		cancel_delayed_work(&hdev->power_off);
522 
523 	/* After this call it is guaranteed that the setup procedure
524 	 * has finished. This means that error conditions like RFKILL
525 	 * or no valid public or static random address apply.
526 	 */
527 	flush_workqueue(hdev->req_workqueue);
528 
529 	/* For controllers not using the management interface and that
530 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531 	 * so that pairing works for them. Once the management interface
532 	 * is in use this bit will be cleared again and userspace has
533 	 * to explicitly enable it.
534 	 */
535 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536 	    !hci_dev_test_flag(hdev, HCI_MGMT))
537 		hci_dev_set_flag(hdev, HCI_BONDABLE);
538 
539 	err = hci_dev_do_open(hdev);
540 
541 done:
542 	hci_dev_put(hdev);
543 	return err;
544 }
545 
hci_dev_do_close(struct hci_dev * hdev)546 int hci_dev_do_close(struct hci_dev *hdev)
547 {
548 	int err;
549 
550 	BT_DBG("%s %p", hdev->name, hdev);
551 
552 	hci_req_sync_lock(hdev);
553 
554 	err = hci_dev_close_sync(hdev);
555 
556 	hci_req_sync_unlock(hdev);
557 
558 	return err;
559 }
560 
hci_dev_close(__u16 dev)561 int hci_dev_close(__u16 dev)
562 {
563 	struct hci_dev *hdev;
564 	int err;
565 
566 	hdev = hci_dev_get(dev);
567 	if (!hdev)
568 		return -ENODEV;
569 
570 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
571 		err = -EBUSY;
572 		goto done;
573 	}
574 
575 	cancel_work_sync(&hdev->power_on);
576 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577 		cancel_delayed_work(&hdev->power_off);
578 
579 	err = hci_dev_do_close(hdev);
580 
581 done:
582 	hci_dev_put(hdev);
583 	return err;
584 }
585 
hci_dev_do_reset(struct hci_dev * hdev)586 static int hci_dev_do_reset(struct hci_dev *hdev)
587 {
588 	int ret;
589 
590 	BT_DBG("%s %p", hdev->name, hdev);
591 
592 	hci_req_sync_lock(hdev);
593 
594 	/* Drop queues */
595 	skb_queue_purge(&hdev->rx_q);
596 	skb_queue_purge(&hdev->cmd_q);
597 
598 	/* Cancel these to avoid queueing non-chained pending work */
599 	hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
600 	/* Wait for
601 	 *
602 	 *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603 	 *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
604 	 *
605 	 * inside RCU section to see the flag or complete scheduling.
606 	 */
607 	synchronize_rcu();
608 	/* Explicitly cancel works in case scheduled after setting the flag. */
609 	cancel_delayed_work(&hdev->cmd_timer);
610 	cancel_delayed_work(&hdev->ncmd_timer);
611 
612 	/* Avoid potential lockdep warnings from the *_flush() calls by
613 	 * ensuring the workqueue is empty up front.
614 	 */
615 	drain_workqueue(hdev->workqueue);
616 
617 	hci_dev_lock(hdev);
618 	hci_inquiry_cache_flush(hdev);
619 	hci_conn_hash_flush(hdev);
620 	hci_dev_unlock(hdev);
621 
622 	if (hdev->flush)
623 		hdev->flush(hdev);
624 
625 	hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
626 
627 	atomic_set(&hdev->cmd_cnt, 1);
628 	hdev->acl_cnt = 0;
629 	hdev->sco_cnt = 0;
630 	hdev->le_cnt = 0;
631 	hdev->iso_cnt = 0;
632 
633 	ret = hci_reset_sync(hdev);
634 
635 	hci_req_sync_unlock(hdev);
636 	return ret;
637 }
638 
hci_dev_reset(__u16 dev)639 int hci_dev_reset(__u16 dev)
640 {
641 	struct hci_dev *hdev;
642 	int err;
643 
644 	hdev = hci_dev_get(dev);
645 	if (!hdev)
646 		return -ENODEV;
647 
648 	if (!test_bit(HCI_UP, &hdev->flags)) {
649 		err = -ENETDOWN;
650 		goto done;
651 	}
652 
653 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
654 		err = -EBUSY;
655 		goto done;
656 	}
657 
658 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
659 		err = -EOPNOTSUPP;
660 		goto done;
661 	}
662 
663 	err = hci_dev_do_reset(hdev);
664 
665 done:
666 	hci_dev_put(hdev);
667 	return err;
668 }
669 
hci_dev_reset_stat(__u16 dev)670 int hci_dev_reset_stat(__u16 dev)
671 {
672 	struct hci_dev *hdev;
673 	int ret = 0;
674 
675 	hdev = hci_dev_get(dev);
676 	if (!hdev)
677 		return -ENODEV;
678 
679 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
680 		ret = -EBUSY;
681 		goto done;
682 	}
683 
684 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
685 		ret = -EOPNOTSUPP;
686 		goto done;
687 	}
688 
689 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
690 
691 done:
692 	hci_dev_put(hdev);
693 	return ret;
694 }
695 
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
697 {
698 	bool conn_changed, discov_changed;
699 
700 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
701 
702 	if ((scan & SCAN_PAGE))
703 		conn_changed = !hci_dev_test_and_set_flag(hdev,
704 							  HCI_CONNECTABLE);
705 	else
706 		conn_changed = hci_dev_test_and_clear_flag(hdev,
707 							   HCI_CONNECTABLE);
708 
709 	if ((scan & SCAN_INQUIRY)) {
710 		discov_changed = !hci_dev_test_and_set_flag(hdev,
711 							    HCI_DISCOVERABLE);
712 	} else {
713 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714 		discov_changed = hci_dev_test_and_clear_flag(hdev,
715 							     HCI_DISCOVERABLE);
716 	}
717 
718 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
719 		return;
720 
721 	if (conn_changed || discov_changed) {
722 		/* In case this was disabled through mgmt */
723 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
724 
725 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726 			hci_update_adv_data(hdev, hdev->cur_adv_instance);
727 
728 		mgmt_new_settings(hdev);
729 	}
730 }
731 
hci_dev_cmd(unsigned int cmd,void __user * arg)732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
733 {
734 	struct hci_dev *hdev;
735 	struct hci_dev_req dr;
736 	int err = 0;
737 
738 	if (copy_from_user(&dr, arg, sizeof(dr)))
739 		return -EFAULT;
740 
741 	hdev = hci_dev_get(dr.dev_id);
742 	if (!hdev)
743 		return -ENODEV;
744 
745 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
746 		err = -EBUSY;
747 		goto done;
748 	}
749 
750 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
751 		err = -EOPNOTSUPP;
752 		goto done;
753 	}
754 
755 	if (hdev->dev_type != HCI_PRIMARY) {
756 		err = -EOPNOTSUPP;
757 		goto done;
758 	}
759 
760 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
761 		err = -EOPNOTSUPP;
762 		goto done;
763 	}
764 
765 	switch (cmd) {
766 	case HCISETAUTH:
767 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768 				   HCI_INIT_TIMEOUT, NULL);
769 		break;
770 
771 	case HCISETENCRYPT:
772 		if (!lmp_encrypt_capable(hdev)) {
773 			err = -EOPNOTSUPP;
774 			break;
775 		}
776 
777 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
778 			/* Auth must be enabled first */
779 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
780 					   HCI_INIT_TIMEOUT, NULL);
781 			if (err)
782 				break;
783 		}
784 
785 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
786 				   HCI_INIT_TIMEOUT, NULL);
787 		break;
788 
789 	case HCISETSCAN:
790 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
791 				   HCI_INIT_TIMEOUT, NULL);
792 
793 		/* Ensure that the connectable and discoverable states
794 		 * get correctly modified as this was a non-mgmt change.
795 		 */
796 		if (!err)
797 			hci_update_passive_scan_state(hdev, dr.dev_opt);
798 		break;
799 
800 	case HCISETLINKPOL:
801 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
802 				   HCI_INIT_TIMEOUT, NULL);
803 		break;
804 
805 	case HCISETLINKMODE:
806 		hdev->link_mode = ((__u16) dr.dev_opt) &
807 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
808 		break;
809 
810 	case HCISETPTYPE:
811 		if (hdev->pkt_type == (__u16) dr.dev_opt)
812 			break;
813 
814 		hdev->pkt_type = (__u16) dr.dev_opt;
815 		mgmt_phy_configuration_changed(hdev, NULL);
816 		break;
817 
818 	case HCISETACLMTU:
819 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
820 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
821 		break;
822 
823 	case HCISETSCOMTU:
824 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
825 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
826 		break;
827 
828 	default:
829 		err = -EINVAL;
830 		break;
831 	}
832 
833 done:
834 	hci_dev_put(hdev);
835 	return err;
836 }
837 
hci_get_dev_list(void __user * arg)838 int hci_get_dev_list(void __user *arg)
839 {
840 	struct hci_dev *hdev;
841 	struct hci_dev_list_req *dl;
842 	struct hci_dev_req *dr;
843 	int n = 0, size, err;
844 	__u16 dev_num;
845 
846 	if (get_user(dev_num, (__u16 __user *) arg))
847 		return -EFAULT;
848 
849 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
850 		return -EINVAL;
851 
852 	size = sizeof(*dl) + dev_num * sizeof(*dr);
853 
854 	dl = kzalloc(size, GFP_KERNEL);
855 	if (!dl)
856 		return -ENOMEM;
857 
858 	dr = dl->dev_req;
859 
860 	read_lock(&hci_dev_list_lock);
861 	list_for_each_entry(hdev, &hci_dev_list, list) {
862 		unsigned long flags = hdev->flags;
863 
864 		/* When the auto-off is configured it means the transport
865 		 * is running, but in that case still indicate that the
866 		 * device is actually down.
867 		 */
868 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869 			flags &= ~BIT(HCI_UP);
870 
871 		(dr + n)->dev_id  = hdev->id;
872 		(dr + n)->dev_opt = flags;
873 
874 		if (++n >= dev_num)
875 			break;
876 	}
877 	read_unlock(&hci_dev_list_lock);
878 
879 	dl->dev_num = n;
880 	size = sizeof(*dl) + n * sizeof(*dr);
881 
882 	err = copy_to_user(arg, dl, size);
883 	kfree(dl);
884 
885 	return err ? -EFAULT : 0;
886 }
887 
hci_get_dev_info(void __user * arg)888 int hci_get_dev_info(void __user *arg)
889 {
890 	struct hci_dev *hdev;
891 	struct hci_dev_info di;
892 	unsigned long flags;
893 	int err = 0;
894 
895 	if (copy_from_user(&di, arg, sizeof(di)))
896 		return -EFAULT;
897 
898 	hdev = hci_dev_get(di.dev_id);
899 	if (!hdev)
900 		return -ENODEV;
901 
902 	/* When the auto-off is configured it means the transport
903 	 * is running, but in that case still indicate that the
904 	 * device is actually down.
905 	 */
906 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907 		flags = hdev->flags & ~BIT(HCI_UP);
908 	else
909 		flags = hdev->flags;
910 
911 	strcpy(di.name, hdev->name);
912 	di.bdaddr   = hdev->bdaddr;
913 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
914 	di.flags    = flags;
915 	di.pkt_type = hdev->pkt_type;
916 	if (lmp_bredr_capable(hdev)) {
917 		di.acl_mtu  = hdev->acl_mtu;
918 		di.acl_pkts = hdev->acl_pkts;
919 		di.sco_mtu  = hdev->sco_mtu;
920 		di.sco_pkts = hdev->sco_pkts;
921 	} else {
922 		di.acl_mtu  = hdev->le_mtu;
923 		di.acl_pkts = hdev->le_pkts;
924 		di.sco_mtu  = 0;
925 		di.sco_pkts = 0;
926 	}
927 	di.link_policy = hdev->link_policy;
928 	di.link_mode   = hdev->link_mode;
929 
930 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931 	memcpy(&di.features, &hdev->features, sizeof(di.features));
932 
933 	if (copy_to_user(arg, &di, sizeof(di)))
934 		err = -EFAULT;
935 
936 	hci_dev_put(hdev);
937 
938 	return err;
939 }
940 
941 /* ---- Interface to HCI drivers ---- */
942 
hci_rfkill_set_block(void * data,bool blocked)943 static int hci_rfkill_set_block(void *data, bool blocked)
944 {
945 	struct hci_dev *hdev = data;
946 
947 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
948 
949 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
950 		return -EBUSY;
951 
952 	if (blocked) {
953 		hci_dev_set_flag(hdev, HCI_RFKILLED);
954 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
955 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
956 			hci_dev_do_close(hdev);
957 	} else {
958 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
959 	}
960 
961 	return 0;
962 }
963 
964 static const struct rfkill_ops hci_rfkill_ops = {
965 	.set_block = hci_rfkill_set_block,
966 };
967 
hci_power_on(struct work_struct * work)968 static void hci_power_on(struct work_struct *work)
969 {
970 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
971 	int err;
972 
973 	BT_DBG("%s", hdev->name);
974 
975 	if (test_bit(HCI_UP, &hdev->flags) &&
976 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
977 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
978 		cancel_delayed_work(&hdev->power_off);
979 		err = hci_powered_update_sync(hdev);
980 		mgmt_power_on(hdev, err);
981 		return;
982 	}
983 
984 	err = hci_dev_do_open(hdev);
985 	if (err < 0) {
986 		hci_dev_lock(hdev);
987 		mgmt_set_powered_failed(hdev, err);
988 		hci_dev_unlock(hdev);
989 		return;
990 	}
991 
992 	/* During the HCI setup phase, a few error conditions are
993 	 * ignored and they need to be checked now. If they are still
994 	 * valid, it is important to turn the device back off.
995 	 */
996 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
997 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
998 	    (hdev->dev_type == HCI_PRIMARY &&
999 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1000 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1001 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1002 		hci_dev_do_close(hdev);
1003 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1004 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1005 				   HCI_AUTO_OFF_TIMEOUT);
1006 	}
1007 
1008 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1009 		/* For unconfigured devices, set the HCI_RAW flag
1010 		 * so that userspace can easily identify them.
1011 		 */
1012 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1013 			set_bit(HCI_RAW, &hdev->flags);
1014 
1015 		/* For fully configured devices, this will send
1016 		 * the Index Added event. For unconfigured devices,
1017 		 * it will send Unconfigued Index Added event.
1018 		 *
1019 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1020 		 * and no event will be send.
1021 		 */
1022 		mgmt_index_added(hdev);
1023 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1024 		/* When the controller is now configured, then it
1025 		 * is important to clear the HCI_RAW flag.
1026 		 */
1027 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1028 			clear_bit(HCI_RAW, &hdev->flags);
1029 
1030 		/* Powering on the controller with HCI_CONFIG set only
1031 		 * happens with the transition from unconfigured to
1032 		 * configured. This will send the Index Added event.
1033 		 */
1034 		mgmt_index_added(hdev);
1035 	}
1036 }
1037 
hci_power_off(struct work_struct * work)1038 static void hci_power_off(struct work_struct *work)
1039 {
1040 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1041 					    power_off.work);
1042 
1043 	BT_DBG("%s", hdev->name);
1044 
1045 	hci_dev_do_close(hdev);
1046 }
1047 
hci_error_reset(struct work_struct * work)1048 static void hci_error_reset(struct work_struct *work)
1049 {
1050 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1051 
1052 	BT_DBG("%s", hdev->name);
1053 
1054 	if (hdev->hw_error)
1055 		hdev->hw_error(hdev, hdev->hw_error_code);
1056 	else
1057 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1058 
1059 	if (hci_dev_do_close(hdev))
1060 		return;
1061 
1062 	hci_dev_do_open(hdev);
1063 }
1064 
hci_uuids_clear(struct hci_dev * hdev)1065 void hci_uuids_clear(struct hci_dev *hdev)
1066 {
1067 	struct bt_uuid *uuid, *tmp;
1068 
1069 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1070 		list_del(&uuid->list);
1071 		kfree(uuid);
1072 	}
1073 }
1074 
hci_link_keys_clear(struct hci_dev * hdev)1075 void hci_link_keys_clear(struct hci_dev *hdev)
1076 {
1077 	struct link_key *key, *tmp;
1078 
1079 	list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1080 		list_del_rcu(&key->list);
1081 		kfree_rcu(key, rcu);
1082 	}
1083 }
1084 
hci_smp_ltks_clear(struct hci_dev * hdev)1085 void hci_smp_ltks_clear(struct hci_dev *hdev)
1086 {
1087 	struct smp_ltk *k, *tmp;
1088 
1089 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1090 		list_del_rcu(&k->list);
1091 		kfree_rcu(k, rcu);
1092 	}
1093 }
1094 
hci_smp_irks_clear(struct hci_dev * hdev)1095 void hci_smp_irks_clear(struct hci_dev *hdev)
1096 {
1097 	struct smp_irk *k, *tmp;
1098 
1099 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1100 		list_del_rcu(&k->list);
1101 		kfree_rcu(k, rcu);
1102 	}
1103 }
1104 
hci_blocked_keys_clear(struct hci_dev * hdev)1105 void hci_blocked_keys_clear(struct hci_dev *hdev)
1106 {
1107 	struct blocked_key *b, *tmp;
1108 
1109 	list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1110 		list_del_rcu(&b->list);
1111 		kfree_rcu(b, rcu);
1112 	}
1113 }
1114 
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1115 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1116 {
1117 	bool blocked = false;
1118 	struct blocked_key *b;
1119 
1120 	rcu_read_lock();
1121 	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1122 		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1123 			blocked = true;
1124 			break;
1125 		}
1126 	}
1127 
1128 	rcu_read_unlock();
1129 	return blocked;
1130 }
1131 
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1132 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1133 {
1134 	struct link_key *k;
1135 
1136 	rcu_read_lock();
1137 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1138 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
1139 			rcu_read_unlock();
1140 
1141 			if (hci_is_blocked_key(hdev,
1142 					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
1143 					       k->val)) {
1144 				bt_dev_warn_ratelimited(hdev,
1145 							"Link key blocked for %pMR",
1146 							&k->bdaddr);
1147 				return NULL;
1148 			}
1149 
1150 			return k;
1151 		}
1152 	}
1153 	rcu_read_unlock();
1154 
1155 	return NULL;
1156 }
1157 
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1158 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1159 			       u8 key_type, u8 old_key_type)
1160 {
1161 	/* Legacy key */
1162 	if (key_type < 0x03)
1163 		return true;
1164 
1165 	/* Debug keys are insecure so don't store them persistently */
1166 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1167 		return false;
1168 
1169 	/* Changed combination key and there's no previous one */
1170 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1171 		return false;
1172 
1173 	/* Security mode 3 case */
1174 	if (!conn)
1175 		return true;
1176 
1177 	/* BR/EDR key derived using SC from an LE link */
1178 	if (conn->type == LE_LINK)
1179 		return true;
1180 
1181 	/* Neither local nor remote side had no-bonding as requirement */
1182 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1183 		return true;
1184 
1185 	/* Local side had dedicated bonding as requirement */
1186 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1187 		return true;
1188 
1189 	/* Remote side had dedicated bonding as requirement */
1190 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1191 		return true;
1192 
1193 	/* If none of the above criteria match, then don't store the key
1194 	 * persistently */
1195 	return false;
1196 }
1197 
ltk_role(u8 type)1198 static u8 ltk_role(u8 type)
1199 {
1200 	if (type == SMP_LTK)
1201 		return HCI_ROLE_MASTER;
1202 
1203 	return HCI_ROLE_SLAVE;
1204 }
1205 
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1206 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1207 			     u8 addr_type, u8 role)
1208 {
1209 	struct smp_ltk *k;
1210 
1211 	rcu_read_lock();
1212 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1213 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1214 			continue;
1215 
1216 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1217 			rcu_read_unlock();
1218 
1219 			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1220 					       k->val)) {
1221 				bt_dev_warn_ratelimited(hdev,
1222 							"LTK blocked for %pMR",
1223 							&k->bdaddr);
1224 				return NULL;
1225 			}
1226 
1227 			return k;
1228 		}
1229 	}
1230 	rcu_read_unlock();
1231 
1232 	return NULL;
1233 }
1234 
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1235 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1236 {
1237 	struct smp_irk *irk_to_return = NULL;
1238 	struct smp_irk *irk;
1239 
1240 	rcu_read_lock();
1241 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1242 		if (!bacmp(&irk->rpa, rpa)) {
1243 			irk_to_return = irk;
1244 			goto done;
1245 		}
1246 	}
1247 
1248 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1249 		if (smp_irk_matches(hdev, irk->val, rpa)) {
1250 			bacpy(&irk->rpa, rpa);
1251 			irk_to_return = irk;
1252 			goto done;
1253 		}
1254 	}
1255 
1256 done:
1257 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1258 						irk_to_return->val)) {
1259 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1260 					&irk_to_return->bdaddr);
1261 		irk_to_return = NULL;
1262 	}
1263 
1264 	rcu_read_unlock();
1265 
1266 	return irk_to_return;
1267 }
1268 
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1269 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1270 				     u8 addr_type)
1271 {
1272 	struct smp_irk *irk_to_return = NULL;
1273 	struct smp_irk *irk;
1274 
1275 	/* Identity Address must be public or static random */
1276 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1277 		return NULL;
1278 
1279 	rcu_read_lock();
1280 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1281 		if (addr_type == irk->addr_type &&
1282 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
1283 			irk_to_return = irk;
1284 			goto done;
1285 		}
1286 	}
1287 
1288 done:
1289 
1290 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1291 						irk_to_return->val)) {
1292 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1293 					&irk_to_return->bdaddr);
1294 		irk_to_return = NULL;
1295 	}
1296 
1297 	rcu_read_unlock();
1298 
1299 	return irk_to_return;
1300 }
1301 
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1302 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1303 				  bdaddr_t *bdaddr, u8 *val, u8 type,
1304 				  u8 pin_len, bool *persistent)
1305 {
1306 	struct link_key *key, *old_key;
1307 	u8 old_key_type;
1308 
1309 	old_key = hci_find_link_key(hdev, bdaddr);
1310 	if (old_key) {
1311 		old_key_type = old_key->type;
1312 		key = old_key;
1313 	} else {
1314 		old_key_type = conn ? conn->key_type : 0xff;
1315 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1316 		if (!key)
1317 			return NULL;
1318 		list_add_rcu(&key->list, &hdev->link_keys);
1319 	}
1320 
1321 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1322 
1323 	/* Some buggy controller combinations generate a changed
1324 	 * combination key for legacy pairing even when there's no
1325 	 * previous key */
1326 	if (type == HCI_LK_CHANGED_COMBINATION &&
1327 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1328 		type = HCI_LK_COMBINATION;
1329 		if (conn)
1330 			conn->key_type = type;
1331 	}
1332 
1333 	bacpy(&key->bdaddr, bdaddr);
1334 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1335 	key->pin_len = pin_len;
1336 
1337 	if (type == HCI_LK_CHANGED_COMBINATION)
1338 		key->type = old_key_type;
1339 	else
1340 		key->type = type;
1341 
1342 	if (persistent)
1343 		*persistent = hci_persistent_key(hdev, conn, type,
1344 						 old_key_type);
1345 
1346 	return key;
1347 }
1348 
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1349 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1350 			    u8 addr_type, u8 type, u8 authenticated,
1351 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1352 {
1353 	struct smp_ltk *key, *old_key;
1354 	u8 role = ltk_role(type);
1355 
1356 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1357 	if (old_key)
1358 		key = old_key;
1359 	else {
1360 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1361 		if (!key)
1362 			return NULL;
1363 		list_add_rcu(&key->list, &hdev->long_term_keys);
1364 	}
1365 
1366 	bacpy(&key->bdaddr, bdaddr);
1367 	key->bdaddr_type = addr_type;
1368 	memcpy(key->val, tk, sizeof(key->val));
1369 	key->authenticated = authenticated;
1370 	key->ediv = ediv;
1371 	key->rand = rand;
1372 	key->enc_size = enc_size;
1373 	key->type = type;
1374 
1375 	return key;
1376 }
1377 
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1378 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1379 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
1380 {
1381 	struct smp_irk *irk;
1382 
1383 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1384 	if (!irk) {
1385 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1386 		if (!irk)
1387 			return NULL;
1388 
1389 		bacpy(&irk->bdaddr, bdaddr);
1390 		irk->addr_type = addr_type;
1391 
1392 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1393 	}
1394 
1395 	memcpy(irk->val, val, 16);
1396 	bacpy(&irk->rpa, rpa);
1397 
1398 	return irk;
1399 }
1400 
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1401 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1402 {
1403 	struct link_key *key;
1404 
1405 	key = hci_find_link_key(hdev, bdaddr);
1406 	if (!key)
1407 		return -ENOENT;
1408 
1409 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1410 
1411 	list_del_rcu(&key->list);
1412 	kfree_rcu(key, rcu);
1413 
1414 	return 0;
1415 }
1416 
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1417 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1418 {
1419 	struct smp_ltk *k, *tmp;
1420 	int removed = 0;
1421 
1422 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1423 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1424 			continue;
1425 
1426 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1427 
1428 		list_del_rcu(&k->list);
1429 		kfree_rcu(k, rcu);
1430 		removed++;
1431 	}
1432 
1433 	return removed ? 0 : -ENOENT;
1434 }
1435 
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1436 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1437 {
1438 	struct smp_irk *k, *tmp;
1439 
1440 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1441 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1442 			continue;
1443 
1444 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1445 
1446 		list_del_rcu(&k->list);
1447 		kfree_rcu(k, rcu);
1448 	}
1449 }
1450 
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1451 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1452 {
1453 	struct smp_ltk *k;
1454 	struct smp_irk *irk;
1455 	u8 addr_type;
1456 
1457 	if (type == BDADDR_BREDR) {
1458 		if (hci_find_link_key(hdev, bdaddr))
1459 			return true;
1460 		return false;
1461 	}
1462 
1463 	/* Convert to HCI addr type which struct smp_ltk uses */
1464 	if (type == BDADDR_LE_PUBLIC)
1465 		addr_type = ADDR_LE_DEV_PUBLIC;
1466 	else
1467 		addr_type = ADDR_LE_DEV_RANDOM;
1468 
1469 	irk = hci_get_irk(hdev, bdaddr, addr_type);
1470 	if (irk) {
1471 		bdaddr = &irk->bdaddr;
1472 		addr_type = irk->addr_type;
1473 	}
1474 
1475 	rcu_read_lock();
1476 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1477 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1478 			rcu_read_unlock();
1479 			return true;
1480 		}
1481 	}
1482 	rcu_read_unlock();
1483 
1484 	return false;
1485 }
1486 
1487 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1488 static void hci_cmd_timeout(struct work_struct *work)
1489 {
1490 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1491 					    cmd_timer.work);
1492 
1493 	if (hdev->sent_cmd) {
1494 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1495 		u16 opcode = __le16_to_cpu(sent->opcode);
1496 
1497 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1498 	} else {
1499 		bt_dev_err(hdev, "command tx timeout");
1500 	}
1501 
1502 	if (hdev->cmd_timeout)
1503 		hdev->cmd_timeout(hdev);
1504 
1505 	atomic_set(&hdev->cmd_cnt, 1);
1506 	queue_work(hdev->workqueue, &hdev->cmd_work);
1507 }
1508 
1509 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1510 static void hci_ncmd_timeout(struct work_struct *work)
1511 {
1512 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1513 					    ncmd_timer.work);
1514 
1515 	bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1516 
1517 	/* During HCI_INIT phase no events can be injected if the ncmd timer
1518 	 * triggers since the procedure has its own timeout handling.
1519 	 */
1520 	if (test_bit(HCI_INIT, &hdev->flags))
1521 		return;
1522 
1523 	/* This is an irrecoverable state, inject hardware error event */
1524 	hci_reset_dev(hdev);
1525 }
1526 
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1527 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1528 					  bdaddr_t *bdaddr, u8 bdaddr_type)
1529 {
1530 	struct oob_data *data;
1531 
1532 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
1533 		if (bacmp(bdaddr, &data->bdaddr) != 0)
1534 			continue;
1535 		if (data->bdaddr_type != bdaddr_type)
1536 			continue;
1537 		return data;
1538 	}
1539 
1540 	return NULL;
1541 }
1542 
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1543 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1544 			       u8 bdaddr_type)
1545 {
1546 	struct oob_data *data;
1547 
1548 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1549 	if (!data)
1550 		return -ENOENT;
1551 
1552 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1553 
1554 	list_del(&data->list);
1555 	kfree(data);
1556 
1557 	return 0;
1558 }
1559 
hci_remote_oob_data_clear(struct hci_dev * hdev)1560 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1561 {
1562 	struct oob_data *data, *n;
1563 
1564 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1565 		list_del(&data->list);
1566 		kfree(data);
1567 	}
1568 }
1569 
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1570 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1571 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1572 			    u8 *hash256, u8 *rand256)
1573 {
1574 	struct oob_data *data;
1575 
1576 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1577 	if (!data) {
1578 		data = kmalloc(sizeof(*data), GFP_KERNEL);
1579 		if (!data)
1580 			return -ENOMEM;
1581 
1582 		bacpy(&data->bdaddr, bdaddr);
1583 		data->bdaddr_type = bdaddr_type;
1584 		list_add(&data->list, &hdev->remote_oob_data);
1585 	}
1586 
1587 	if (hash192 && rand192) {
1588 		memcpy(data->hash192, hash192, sizeof(data->hash192));
1589 		memcpy(data->rand192, rand192, sizeof(data->rand192));
1590 		if (hash256 && rand256)
1591 			data->present = 0x03;
1592 	} else {
1593 		memset(data->hash192, 0, sizeof(data->hash192));
1594 		memset(data->rand192, 0, sizeof(data->rand192));
1595 		if (hash256 && rand256)
1596 			data->present = 0x02;
1597 		else
1598 			data->present = 0x00;
1599 	}
1600 
1601 	if (hash256 && rand256) {
1602 		memcpy(data->hash256, hash256, sizeof(data->hash256));
1603 		memcpy(data->rand256, rand256, sizeof(data->rand256));
1604 	} else {
1605 		memset(data->hash256, 0, sizeof(data->hash256));
1606 		memset(data->rand256, 0, sizeof(data->rand256));
1607 		if (hash192 && rand192)
1608 			data->present = 0x01;
1609 	}
1610 
1611 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1612 
1613 	return 0;
1614 }
1615 
1616 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1617 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1618 {
1619 	struct adv_info *adv_instance;
1620 
1621 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1622 		if (adv_instance->instance == instance)
1623 			return adv_instance;
1624 	}
1625 
1626 	return NULL;
1627 }
1628 
1629 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1630 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1631 {
1632 	struct adv_info *cur_instance;
1633 
1634 	cur_instance = hci_find_adv_instance(hdev, instance);
1635 	if (!cur_instance)
1636 		return NULL;
1637 
1638 	if (cur_instance == list_last_entry(&hdev->adv_instances,
1639 					    struct adv_info, list))
1640 		return list_first_entry(&hdev->adv_instances,
1641 						 struct adv_info, list);
1642 	else
1643 		return list_next_entry(cur_instance, list);
1644 }
1645 
1646 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1647 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1648 {
1649 	struct adv_info *adv_instance;
1650 
1651 	adv_instance = hci_find_adv_instance(hdev, instance);
1652 	if (!adv_instance)
1653 		return -ENOENT;
1654 
1655 	BT_DBG("%s removing %dMR", hdev->name, instance);
1656 
1657 	if (hdev->cur_adv_instance == instance) {
1658 		if (hdev->adv_instance_timeout) {
1659 			cancel_delayed_work(&hdev->adv_instance_expire);
1660 			hdev->adv_instance_timeout = 0;
1661 		}
1662 		hdev->cur_adv_instance = 0x00;
1663 	}
1664 
1665 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1666 
1667 	list_del(&adv_instance->list);
1668 	kfree(adv_instance);
1669 
1670 	hdev->adv_instance_cnt--;
1671 
1672 	return 0;
1673 }
1674 
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1675 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1676 {
1677 	struct adv_info *adv_instance, *n;
1678 
1679 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1680 		adv_instance->rpa_expired = rpa_expired;
1681 }
1682 
1683 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1684 void hci_adv_instances_clear(struct hci_dev *hdev)
1685 {
1686 	struct adv_info *adv_instance, *n;
1687 
1688 	if (hdev->adv_instance_timeout) {
1689 		cancel_delayed_work(&hdev->adv_instance_expire);
1690 		hdev->adv_instance_timeout = 0;
1691 	}
1692 
1693 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1694 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1695 		list_del(&adv_instance->list);
1696 		kfree(adv_instance);
1697 	}
1698 
1699 	hdev->adv_instance_cnt = 0;
1700 	hdev->cur_adv_instance = 0x00;
1701 }
1702 
adv_instance_rpa_expired(struct work_struct * work)1703 static void adv_instance_rpa_expired(struct work_struct *work)
1704 {
1705 	struct adv_info *adv_instance = container_of(work, struct adv_info,
1706 						     rpa_expired_cb.work);
1707 
1708 	BT_DBG("");
1709 
1710 	adv_instance->rpa_expired = true;
1711 }
1712 
1713 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1714 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1715 				      u32 flags, u16 adv_data_len, u8 *adv_data,
1716 				      u16 scan_rsp_len, u8 *scan_rsp_data,
1717 				      u16 timeout, u16 duration, s8 tx_power,
1718 				      u32 min_interval, u32 max_interval,
1719 				      u8 mesh_handle)
1720 {
1721 	struct adv_info *adv;
1722 
1723 	adv = hci_find_adv_instance(hdev, instance);
1724 	if (adv) {
1725 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1726 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1727 		memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1728 	} else {
1729 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1730 		    instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1731 			return ERR_PTR(-EOVERFLOW);
1732 
1733 		adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1734 		if (!adv)
1735 			return ERR_PTR(-ENOMEM);
1736 
1737 		adv->pending = true;
1738 		adv->instance = instance;
1739 		list_add(&adv->list, &hdev->adv_instances);
1740 		hdev->adv_instance_cnt++;
1741 	}
1742 
1743 	adv->flags = flags;
1744 	adv->min_interval = min_interval;
1745 	adv->max_interval = max_interval;
1746 	adv->tx_power = tx_power;
1747 	/* Defining a mesh_handle changes the timing units to ms,
1748 	 * rather than seconds, and ties the instance to the requested
1749 	 * mesh_tx queue.
1750 	 */
1751 	adv->mesh = mesh_handle;
1752 
1753 	hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1754 				  scan_rsp_len, scan_rsp_data);
1755 
1756 	adv->timeout = timeout;
1757 	adv->remaining_time = timeout;
1758 
1759 	if (duration == 0)
1760 		adv->duration = hdev->def_multi_adv_rotation_duration;
1761 	else
1762 		adv->duration = duration;
1763 
1764 	INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1765 
1766 	BT_DBG("%s for %dMR", hdev->name, instance);
1767 
1768 	return adv;
1769 }
1770 
1771 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1772 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1773 				      u32 flags, u8 data_len, u8 *data,
1774 				      u32 min_interval, u32 max_interval)
1775 {
1776 	struct adv_info *adv;
1777 
1778 	adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1779 				   0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1780 				   min_interval, max_interval, 0);
1781 	if (IS_ERR(adv))
1782 		return adv;
1783 
1784 	adv->periodic = true;
1785 	adv->per_adv_data_len = data_len;
1786 
1787 	if (data)
1788 		memcpy(adv->per_adv_data, data, data_len);
1789 
1790 	return adv;
1791 }
1792 
1793 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1794 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1795 			      u16 adv_data_len, u8 *adv_data,
1796 			      u16 scan_rsp_len, u8 *scan_rsp_data)
1797 {
1798 	struct adv_info *adv;
1799 
1800 	adv = hci_find_adv_instance(hdev, instance);
1801 
1802 	/* If advertisement doesn't exist, we can't modify its data */
1803 	if (!adv)
1804 		return -ENOENT;
1805 
1806 	if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1807 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1808 		memcpy(adv->adv_data, adv_data, adv_data_len);
1809 		adv->adv_data_len = adv_data_len;
1810 		adv->adv_data_changed = true;
1811 	}
1812 
1813 	if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1814 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1815 		memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1816 		adv->scan_rsp_len = scan_rsp_len;
1817 		adv->scan_rsp_changed = true;
1818 	}
1819 
1820 	/* Mark as changed if there are flags which would affect it */
1821 	if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1822 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1823 		adv->scan_rsp_changed = true;
1824 
1825 	return 0;
1826 }
1827 
1828 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1829 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1830 {
1831 	u32 flags;
1832 	struct adv_info *adv;
1833 
1834 	if (instance == 0x00) {
1835 		/* Instance 0 always manages the "Tx Power" and "Flags"
1836 		 * fields
1837 		 */
1838 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1839 
1840 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1841 		 * corresponds to the "connectable" instance flag.
1842 		 */
1843 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1844 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1845 
1846 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1847 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1848 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1849 			flags |= MGMT_ADV_FLAG_DISCOV;
1850 
1851 		return flags;
1852 	}
1853 
1854 	adv = hci_find_adv_instance(hdev, instance);
1855 
1856 	/* Return 0 when we got an invalid instance identifier. */
1857 	if (!adv)
1858 		return 0;
1859 
1860 	return adv->flags;
1861 }
1862 
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1863 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1864 {
1865 	struct adv_info *adv;
1866 
1867 	/* Instance 0x00 always set local name */
1868 	if (instance == 0x00)
1869 		return true;
1870 
1871 	adv = hci_find_adv_instance(hdev, instance);
1872 	if (!adv)
1873 		return false;
1874 
1875 	if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1876 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1877 		return true;
1878 
1879 	return adv->scan_rsp_len ? true : false;
1880 }
1881 
1882 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1883 void hci_adv_monitors_clear(struct hci_dev *hdev)
1884 {
1885 	struct adv_monitor *monitor;
1886 	int handle;
1887 
1888 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1889 		hci_free_adv_monitor(hdev, monitor);
1890 
1891 	idr_destroy(&hdev->adv_monitors_idr);
1892 }
1893 
1894 /* Frees the monitor structure and do some bookkeepings.
1895  * This function requires the caller holds hdev->lock.
1896  */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1897 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1898 {
1899 	struct adv_pattern *pattern;
1900 	struct adv_pattern *tmp;
1901 
1902 	if (!monitor)
1903 		return;
1904 
1905 	list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1906 		list_del(&pattern->list);
1907 		kfree(pattern);
1908 	}
1909 
1910 	if (monitor->handle)
1911 		idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1912 
1913 	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1914 		hdev->adv_monitors_cnt--;
1915 		mgmt_adv_monitor_removed(hdev, monitor->handle);
1916 	}
1917 
1918 	kfree(monitor);
1919 }
1920 
1921 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1922  * also attempts to forward the request to the controller.
1923  * This function requires the caller holds hci_req_sync_lock.
1924  */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1925 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1926 {
1927 	int min, max, handle;
1928 	int status = 0;
1929 
1930 	if (!monitor)
1931 		return -EINVAL;
1932 
1933 	hci_dev_lock(hdev);
1934 
1935 	min = HCI_MIN_ADV_MONITOR_HANDLE;
1936 	max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1937 	handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1938 			   GFP_KERNEL);
1939 
1940 	hci_dev_unlock(hdev);
1941 
1942 	if (handle < 0)
1943 		return handle;
1944 
1945 	monitor->handle = handle;
1946 
1947 	if (!hdev_is_powered(hdev))
1948 		return status;
1949 
1950 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1951 	case HCI_ADV_MONITOR_EXT_NONE:
1952 		bt_dev_dbg(hdev, "add monitor %d status %d",
1953 			   monitor->handle, status);
1954 		/* Message was not forwarded to controller - not an error */
1955 		break;
1956 
1957 	case HCI_ADV_MONITOR_EXT_MSFT:
1958 		status = msft_add_monitor_pattern(hdev, monitor);
1959 		bt_dev_dbg(hdev, "add monitor %d msft status %d",
1960 			   handle, status);
1961 		break;
1962 	}
1963 
1964 	return status;
1965 }
1966 
1967 /* Attempts to tell the controller and free the monitor. If somehow the
1968  * controller doesn't have a corresponding handle, remove anyway.
1969  * This function requires the caller holds hci_req_sync_lock.
1970  */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1971 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1972 				  struct adv_monitor *monitor)
1973 {
1974 	int status = 0;
1975 	int handle;
1976 
1977 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1978 	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1979 		bt_dev_dbg(hdev, "remove monitor %d status %d",
1980 			   monitor->handle, status);
1981 		goto free_monitor;
1982 
1983 	case HCI_ADV_MONITOR_EXT_MSFT:
1984 		handle = monitor->handle;
1985 		status = msft_remove_monitor(hdev, monitor);
1986 		bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1987 			   handle, status);
1988 		break;
1989 	}
1990 
1991 	/* In case no matching handle registered, just free the monitor */
1992 	if (status == -ENOENT)
1993 		goto free_monitor;
1994 
1995 	return status;
1996 
1997 free_monitor:
1998 	if (status == -ENOENT)
1999 		bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2000 			    monitor->handle);
2001 	hci_free_adv_monitor(hdev, monitor);
2002 
2003 	return status;
2004 }
2005 
2006 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)2007 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2008 {
2009 	struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2010 
2011 	if (!monitor)
2012 		return -EINVAL;
2013 
2014 	return hci_remove_adv_monitor(hdev, monitor);
2015 }
2016 
2017 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)2018 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2019 {
2020 	struct adv_monitor *monitor;
2021 	int idr_next_id = 0;
2022 	int status = 0;
2023 
2024 	while (1) {
2025 		monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2026 		if (!monitor)
2027 			break;
2028 
2029 		status = hci_remove_adv_monitor(hdev, monitor);
2030 		if (status)
2031 			return status;
2032 
2033 		idr_next_id++;
2034 	}
2035 
2036 	return status;
2037 }
2038 
2039 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2040 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2041 {
2042 	return !idr_is_empty(&hdev->adv_monitors_idr);
2043 }
2044 
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2045 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2046 {
2047 	if (msft_monitor_supported(hdev))
2048 		return HCI_ADV_MONITOR_EXT_MSFT;
2049 
2050 	return HCI_ADV_MONITOR_EXT_NONE;
2051 }
2052 
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2053 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2054 					 bdaddr_t *bdaddr, u8 type)
2055 {
2056 	struct bdaddr_list *b;
2057 
2058 	list_for_each_entry(b, bdaddr_list, list) {
2059 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2060 			return b;
2061 	}
2062 
2063 	return NULL;
2064 }
2065 
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2066 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2067 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2068 				u8 type)
2069 {
2070 	struct bdaddr_list_with_irk *b;
2071 
2072 	list_for_each_entry(b, bdaddr_list, list) {
2073 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2074 			return b;
2075 	}
2076 
2077 	return NULL;
2078 }
2079 
2080 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2081 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2082 				  bdaddr_t *bdaddr, u8 type)
2083 {
2084 	struct bdaddr_list_with_flags *b;
2085 
2086 	list_for_each_entry(b, bdaddr_list, list) {
2087 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2088 			return b;
2089 	}
2090 
2091 	return NULL;
2092 }
2093 
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2094 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2095 {
2096 	struct bdaddr_list *b, *n;
2097 
2098 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2099 		list_del(&b->list);
2100 		kfree(b);
2101 	}
2102 }
2103 
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2104 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2105 {
2106 	struct bdaddr_list *entry;
2107 
2108 	if (!bacmp(bdaddr, BDADDR_ANY))
2109 		return -EBADF;
2110 
2111 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2112 		return -EEXIST;
2113 
2114 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2115 	if (!entry)
2116 		return -ENOMEM;
2117 
2118 	bacpy(&entry->bdaddr, bdaddr);
2119 	entry->bdaddr_type = type;
2120 
2121 	list_add(&entry->list, list);
2122 
2123 	return 0;
2124 }
2125 
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2126 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2127 					u8 type, u8 *peer_irk, u8 *local_irk)
2128 {
2129 	struct bdaddr_list_with_irk *entry;
2130 
2131 	if (!bacmp(bdaddr, BDADDR_ANY))
2132 		return -EBADF;
2133 
2134 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2135 		return -EEXIST;
2136 
2137 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2138 	if (!entry)
2139 		return -ENOMEM;
2140 
2141 	bacpy(&entry->bdaddr, bdaddr);
2142 	entry->bdaddr_type = type;
2143 
2144 	if (peer_irk)
2145 		memcpy(entry->peer_irk, peer_irk, 16);
2146 
2147 	if (local_irk)
2148 		memcpy(entry->local_irk, local_irk, 16);
2149 
2150 	list_add(&entry->list, list);
2151 
2152 	return 0;
2153 }
2154 
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2155 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2156 				   u8 type, u32 flags)
2157 {
2158 	struct bdaddr_list_with_flags *entry;
2159 
2160 	if (!bacmp(bdaddr, BDADDR_ANY))
2161 		return -EBADF;
2162 
2163 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2164 		return -EEXIST;
2165 
2166 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2167 	if (!entry)
2168 		return -ENOMEM;
2169 
2170 	bacpy(&entry->bdaddr, bdaddr);
2171 	entry->bdaddr_type = type;
2172 	entry->flags = flags;
2173 
2174 	list_add(&entry->list, list);
2175 
2176 	return 0;
2177 }
2178 
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2179 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2180 {
2181 	struct bdaddr_list *entry;
2182 
2183 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2184 		hci_bdaddr_list_clear(list);
2185 		return 0;
2186 	}
2187 
2188 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2189 	if (!entry)
2190 		return -ENOENT;
2191 
2192 	list_del(&entry->list);
2193 	kfree(entry);
2194 
2195 	return 0;
2196 }
2197 
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2198 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2199 							u8 type)
2200 {
2201 	struct bdaddr_list_with_irk *entry;
2202 
2203 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2204 		hci_bdaddr_list_clear(list);
2205 		return 0;
2206 	}
2207 
2208 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2209 	if (!entry)
2210 		return -ENOENT;
2211 
2212 	list_del(&entry->list);
2213 	kfree(entry);
2214 
2215 	return 0;
2216 }
2217 
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2218 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2219 				   u8 type)
2220 {
2221 	struct bdaddr_list_with_flags *entry;
2222 
2223 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2224 		hci_bdaddr_list_clear(list);
2225 		return 0;
2226 	}
2227 
2228 	entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2229 	if (!entry)
2230 		return -ENOENT;
2231 
2232 	list_del(&entry->list);
2233 	kfree(entry);
2234 
2235 	return 0;
2236 }
2237 
2238 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2239 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2240 					       bdaddr_t *addr, u8 addr_type)
2241 {
2242 	struct hci_conn_params *params;
2243 
2244 	list_for_each_entry(params, &hdev->le_conn_params, list) {
2245 		if (bacmp(&params->addr, addr) == 0 &&
2246 		    params->addr_type == addr_type) {
2247 			return params;
2248 		}
2249 	}
2250 
2251 	return NULL;
2252 }
2253 
2254 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2255 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2256 						  bdaddr_t *addr, u8 addr_type)
2257 {
2258 	struct hci_conn_params *param;
2259 
2260 	rcu_read_lock();
2261 
2262 	list_for_each_entry_rcu(param, list, action) {
2263 		if (bacmp(&param->addr, addr) == 0 &&
2264 		    param->addr_type == addr_type) {
2265 			rcu_read_unlock();
2266 			return param;
2267 		}
2268 	}
2269 
2270 	rcu_read_unlock();
2271 
2272 	return NULL;
2273 }
2274 
2275 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2276 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2277 {
2278 	if (list_empty(&param->action))
2279 		return;
2280 
2281 	list_del_rcu(&param->action);
2282 	synchronize_rcu();
2283 	INIT_LIST_HEAD(&param->action);
2284 }
2285 
2286 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2287 void hci_pend_le_list_add(struct hci_conn_params *param,
2288 			  struct list_head *list)
2289 {
2290 	list_add_rcu(&param->action, list);
2291 }
2292 
2293 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2294 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2295 					    bdaddr_t *addr, u8 addr_type)
2296 {
2297 	struct hci_conn_params *params;
2298 
2299 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2300 	if (params)
2301 		return params;
2302 
2303 	params = kzalloc(sizeof(*params), GFP_KERNEL);
2304 	if (!params) {
2305 		bt_dev_err(hdev, "out of memory");
2306 		return NULL;
2307 	}
2308 
2309 	bacpy(&params->addr, addr);
2310 	params->addr_type = addr_type;
2311 
2312 	list_add(&params->list, &hdev->le_conn_params);
2313 	INIT_LIST_HEAD(&params->action);
2314 
2315 	params->conn_min_interval = hdev->le_conn_min_interval;
2316 	params->conn_max_interval = hdev->le_conn_max_interval;
2317 	params->conn_latency = hdev->le_conn_latency;
2318 	params->supervision_timeout = hdev->le_supv_timeout;
2319 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2320 
2321 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2322 
2323 	return params;
2324 }
2325 
hci_conn_params_free(struct hci_conn_params * params)2326 void hci_conn_params_free(struct hci_conn_params *params)
2327 {
2328 	hci_pend_le_list_del_init(params);
2329 
2330 	if (params->conn) {
2331 		hci_conn_drop(params->conn);
2332 		hci_conn_put(params->conn);
2333 	}
2334 
2335 	list_del(&params->list);
2336 	kfree(params);
2337 }
2338 
2339 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2340 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2341 {
2342 	struct hci_conn_params *params;
2343 
2344 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2345 	if (!params)
2346 		return;
2347 
2348 	hci_conn_params_free(params);
2349 
2350 	hci_update_passive_scan(hdev);
2351 
2352 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2353 }
2354 
2355 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2356 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2357 {
2358 	struct hci_conn_params *params, *tmp;
2359 
2360 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2361 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2362 			continue;
2363 
2364 		/* If trying to establish one time connection to disabled
2365 		 * device, leave the params, but mark them as just once.
2366 		 */
2367 		if (params->explicit_connect) {
2368 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2369 			continue;
2370 		}
2371 
2372 		hci_conn_params_free(params);
2373 	}
2374 
2375 	BT_DBG("All LE disabled connection parameters were removed");
2376 }
2377 
2378 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2379 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2380 {
2381 	struct hci_conn_params *params, *tmp;
2382 
2383 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2384 		hci_conn_params_free(params);
2385 
2386 	BT_DBG("All LE connection parameters were removed");
2387 }
2388 
2389 /* Copy the Identity Address of the controller.
2390  *
2391  * If the controller has a public BD_ADDR, then by default use that one.
2392  * If this is a LE only controller without a public address, default to
2393  * the static random address.
2394  *
2395  * For debugging purposes it is possible to force controllers with a
2396  * public address to use the static random address instead.
2397  *
2398  * In case BR/EDR has been disabled on a dual-mode controller and
2399  * userspace has configured a static address, then that address
2400  * becomes the identity address instead of the public BR/EDR address.
2401  */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2402 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2403 			       u8 *bdaddr_type)
2404 {
2405 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2406 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2407 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2408 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2409 		bacpy(bdaddr, &hdev->static_addr);
2410 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
2411 	} else {
2412 		bacpy(bdaddr, &hdev->bdaddr);
2413 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
2414 	}
2415 }
2416 
hci_clear_wake_reason(struct hci_dev * hdev)2417 static void hci_clear_wake_reason(struct hci_dev *hdev)
2418 {
2419 	hci_dev_lock(hdev);
2420 
2421 	hdev->wake_reason = 0;
2422 	bacpy(&hdev->wake_addr, BDADDR_ANY);
2423 	hdev->wake_addr_type = 0;
2424 
2425 	hci_dev_unlock(hdev);
2426 }
2427 
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2428 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2429 				void *data)
2430 {
2431 	struct hci_dev *hdev =
2432 		container_of(nb, struct hci_dev, suspend_notifier);
2433 	int ret = 0;
2434 
2435 	/* Userspace has full control of this device. Do nothing. */
2436 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2437 		return NOTIFY_DONE;
2438 
2439 	/* To avoid a potential race with hci_unregister_dev. */
2440 	hci_dev_hold(hdev);
2441 
2442 	if (action == PM_SUSPEND_PREPARE)
2443 		ret = hci_suspend_dev(hdev);
2444 	else if (action == PM_POST_SUSPEND)
2445 		ret = hci_resume_dev(hdev);
2446 
2447 	if (ret)
2448 		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2449 			   action, ret);
2450 
2451 	hci_dev_put(hdev);
2452 	return NOTIFY_DONE;
2453 }
2454 
2455 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2456 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2457 {
2458 	struct hci_dev *hdev;
2459 	unsigned int alloc_size;
2460 
2461 	alloc_size = sizeof(*hdev);
2462 	if (sizeof_priv) {
2463 		/* Fixme: May need ALIGN-ment? */
2464 		alloc_size += sizeof_priv;
2465 	}
2466 
2467 	hdev = kzalloc(alloc_size, GFP_KERNEL);
2468 	if (!hdev)
2469 		return NULL;
2470 
2471 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2472 	hdev->esco_type = (ESCO_HV1);
2473 	hdev->link_mode = (HCI_LM_ACCEPT);
2474 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
2475 	hdev->io_capability = 0x03;	/* No Input No Output */
2476 	hdev->manufacturer = 0xffff;	/* Default to internal use */
2477 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2478 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2479 	hdev->adv_instance_cnt = 0;
2480 	hdev->cur_adv_instance = 0x00;
2481 	hdev->adv_instance_timeout = 0;
2482 
2483 	hdev->advmon_allowlist_duration = 300;
2484 	hdev->advmon_no_filter_duration = 500;
2485 	hdev->enable_advmon_interleave_scan = 0x00;	/* Default to disable */
2486 
2487 	hdev->sniff_max_interval = 800;
2488 	hdev->sniff_min_interval = 80;
2489 
2490 	hdev->le_adv_channel_map = 0x07;
2491 	hdev->le_adv_min_interval = 0x0800;
2492 	hdev->le_adv_max_interval = 0x0800;
2493 	hdev->le_scan_interval = 0x0060;
2494 	hdev->le_scan_window = 0x0030;
2495 	hdev->le_scan_int_suspend = 0x0400;
2496 	hdev->le_scan_window_suspend = 0x0012;
2497 	hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2498 	hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2499 	hdev->le_scan_int_adv_monitor = 0x0060;
2500 	hdev->le_scan_window_adv_monitor = 0x0030;
2501 	hdev->le_scan_int_connect = 0x0060;
2502 	hdev->le_scan_window_connect = 0x0060;
2503 	hdev->le_conn_min_interval = 0x0018;
2504 	hdev->le_conn_max_interval = 0x0028;
2505 	hdev->le_conn_latency = 0x0000;
2506 	hdev->le_supv_timeout = 0x002a;
2507 	hdev->le_def_tx_len = 0x001b;
2508 	hdev->le_def_tx_time = 0x0148;
2509 	hdev->le_max_tx_len = 0x001b;
2510 	hdev->le_max_tx_time = 0x0148;
2511 	hdev->le_max_rx_len = 0x001b;
2512 	hdev->le_max_rx_time = 0x0148;
2513 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2514 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2515 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2516 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2517 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2518 	hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2519 	hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2520 	hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2521 	hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2522 
2523 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2524 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2525 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2526 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2527 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2528 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2529 
2530 	/* default 1.28 sec page scan */
2531 	hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2532 	hdev->def_page_scan_int = 0x0800;
2533 	hdev->def_page_scan_window = 0x0012;
2534 
2535 	mutex_init(&hdev->lock);
2536 	mutex_init(&hdev->req_lock);
2537 
2538 	INIT_LIST_HEAD(&hdev->mesh_pending);
2539 	INIT_LIST_HEAD(&hdev->mgmt_pending);
2540 	INIT_LIST_HEAD(&hdev->reject_list);
2541 	INIT_LIST_HEAD(&hdev->accept_list);
2542 	INIT_LIST_HEAD(&hdev->uuids);
2543 	INIT_LIST_HEAD(&hdev->link_keys);
2544 	INIT_LIST_HEAD(&hdev->long_term_keys);
2545 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2546 	INIT_LIST_HEAD(&hdev->remote_oob_data);
2547 	INIT_LIST_HEAD(&hdev->le_accept_list);
2548 	INIT_LIST_HEAD(&hdev->le_resolv_list);
2549 	INIT_LIST_HEAD(&hdev->le_conn_params);
2550 	INIT_LIST_HEAD(&hdev->pend_le_conns);
2551 	INIT_LIST_HEAD(&hdev->pend_le_reports);
2552 	INIT_LIST_HEAD(&hdev->conn_hash.list);
2553 	INIT_LIST_HEAD(&hdev->adv_instances);
2554 	INIT_LIST_HEAD(&hdev->blocked_keys);
2555 	INIT_LIST_HEAD(&hdev->monitored_devices);
2556 
2557 	INIT_LIST_HEAD(&hdev->local_codecs);
2558 	INIT_WORK(&hdev->rx_work, hci_rx_work);
2559 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2560 	INIT_WORK(&hdev->tx_work, hci_tx_work);
2561 	INIT_WORK(&hdev->power_on, hci_power_on);
2562 	INIT_WORK(&hdev->error_reset, hci_error_reset);
2563 
2564 	hci_cmd_sync_init(hdev);
2565 
2566 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2567 
2568 	skb_queue_head_init(&hdev->rx_q);
2569 	skb_queue_head_init(&hdev->cmd_q);
2570 	skb_queue_head_init(&hdev->raw_q);
2571 
2572 	init_waitqueue_head(&hdev->req_wait_q);
2573 
2574 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2575 	INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2576 
2577 	hci_devcd_setup(hdev);
2578 	hci_request_setup(hdev);
2579 
2580 	hci_init_sysfs(hdev);
2581 	discovery_init(hdev);
2582 
2583 	return hdev;
2584 }
2585 EXPORT_SYMBOL(hci_alloc_dev_priv);
2586 
2587 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2588 void hci_free_dev(struct hci_dev *hdev)
2589 {
2590 	/* will free via device release */
2591 	put_device(&hdev->dev);
2592 }
2593 EXPORT_SYMBOL(hci_free_dev);
2594 
2595 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2596 int hci_register_dev(struct hci_dev *hdev)
2597 {
2598 	int id, error;
2599 
2600 	if (!hdev->open || !hdev->close || !hdev->send)
2601 		return -EINVAL;
2602 
2603 	/* Do not allow HCI_AMP devices to register at index 0,
2604 	 * so the index can be used as the AMP controller ID.
2605 	 */
2606 	switch (hdev->dev_type) {
2607 	case HCI_PRIMARY:
2608 		id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2609 		break;
2610 	case HCI_AMP:
2611 		id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2612 		break;
2613 	default:
2614 		return -EINVAL;
2615 	}
2616 
2617 	if (id < 0)
2618 		return id;
2619 
2620 	error = dev_set_name(&hdev->dev, "hci%u", id);
2621 	if (error)
2622 		return error;
2623 
2624 	hdev->name = dev_name(&hdev->dev);
2625 	hdev->id = id;
2626 
2627 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2628 
2629 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2630 	if (!hdev->workqueue) {
2631 		error = -ENOMEM;
2632 		goto err;
2633 	}
2634 
2635 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2636 						      hdev->name);
2637 	if (!hdev->req_workqueue) {
2638 		destroy_workqueue(hdev->workqueue);
2639 		error = -ENOMEM;
2640 		goto err;
2641 	}
2642 
2643 	if (!IS_ERR_OR_NULL(bt_debugfs))
2644 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2645 
2646 	error = device_add(&hdev->dev);
2647 	if (error < 0)
2648 		goto err_wqueue;
2649 
2650 	hci_leds_init(hdev);
2651 
2652 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2653 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2654 				    hdev);
2655 	if (hdev->rfkill) {
2656 		if (rfkill_register(hdev->rfkill) < 0) {
2657 			rfkill_destroy(hdev->rfkill);
2658 			hdev->rfkill = NULL;
2659 		}
2660 	}
2661 
2662 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2663 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2664 
2665 	hci_dev_set_flag(hdev, HCI_SETUP);
2666 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2667 
2668 	if (hdev->dev_type == HCI_PRIMARY) {
2669 		/* Assume BR/EDR support until proven otherwise (such as
2670 		 * through reading supported features during init.
2671 		 */
2672 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2673 	}
2674 
2675 	write_lock(&hci_dev_list_lock);
2676 	list_add(&hdev->list, &hci_dev_list);
2677 	write_unlock(&hci_dev_list_lock);
2678 
2679 	/* Devices that are marked for raw-only usage are unconfigured
2680 	 * and should not be included in normal operation.
2681 	 */
2682 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2683 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2684 
2685 	/* Mark Remote Wakeup connection flag as supported if driver has wakeup
2686 	 * callback.
2687 	 */
2688 	if (hdev->wakeup)
2689 		hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2690 
2691 	hci_sock_dev_event(hdev, HCI_DEV_REG);
2692 	hci_dev_hold(hdev);
2693 
2694 	error = hci_register_suspend_notifier(hdev);
2695 	if (error)
2696 		BT_WARN("register suspend notifier failed error:%d\n", error);
2697 
2698 	queue_work(hdev->req_workqueue, &hdev->power_on);
2699 
2700 	idr_init(&hdev->adv_monitors_idr);
2701 	msft_register(hdev);
2702 
2703 	return id;
2704 
2705 err_wqueue:
2706 	debugfs_remove_recursive(hdev->debugfs);
2707 	destroy_workqueue(hdev->workqueue);
2708 	destroy_workqueue(hdev->req_workqueue);
2709 err:
2710 	ida_simple_remove(&hci_index_ida, hdev->id);
2711 
2712 	return error;
2713 }
2714 EXPORT_SYMBOL(hci_register_dev);
2715 
2716 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2717 void hci_unregister_dev(struct hci_dev *hdev)
2718 {
2719 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2720 
2721 	mutex_lock(&hdev->unregister_lock);
2722 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
2723 	mutex_unlock(&hdev->unregister_lock);
2724 
2725 	write_lock(&hci_dev_list_lock);
2726 	list_del(&hdev->list);
2727 	write_unlock(&hci_dev_list_lock);
2728 
2729 	cancel_work_sync(&hdev->power_on);
2730 
2731 	hci_cmd_sync_clear(hdev);
2732 
2733 	hci_unregister_suspend_notifier(hdev);
2734 
2735 	msft_unregister(hdev);
2736 
2737 	hci_dev_do_close(hdev);
2738 
2739 	if (!test_bit(HCI_INIT, &hdev->flags) &&
2740 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
2741 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2742 		hci_dev_lock(hdev);
2743 		mgmt_index_removed(hdev);
2744 		hci_dev_unlock(hdev);
2745 	}
2746 
2747 	/* mgmt_index_removed should take care of emptying the
2748 	 * pending list */
2749 	BUG_ON(!list_empty(&hdev->mgmt_pending));
2750 
2751 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2752 
2753 	if (hdev->rfkill) {
2754 		rfkill_unregister(hdev->rfkill);
2755 		rfkill_destroy(hdev->rfkill);
2756 	}
2757 
2758 	device_del(&hdev->dev);
2759 	/* Actual cleanup is deferred until hci_release_dev(). */
2760 	hci_dev_put(hdev);
2761 }
2762 EXPORT_SYMBOL(hci_unregister_dev);
2763 
2764 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2765 void hci_release_dev(struct hci_dev *hdev)
2766 {
2767 	debugfs_remove_recursive(hdev->debugfs);
2768 	kfree_const(hdev->hw_info);
2769 	kfree_const(hdev->fw_info);
2770 
2771 	destroy_workqueue(hdev->workqueue);
2772 	destroy_workqueue(hdev->req_workqueue);
2773 
2774 	hci_dev_lock(hdev);
2775 	hci_bdaddr_list_clear(&hdev->reject_list);
2776 	hci_bdaddr_list_clear(&hdev->accept_list);
2777 	hci_uuids_clear(hdev);
2778 	hci_link_keys_clear(hdev);
2779 	hci_smp_ltks_clear(hdev);
2780 	hci_smp_irks_clear(hdev);
2781 	hci_remote_oob_data_clear(hdev);
2782 	hci_adv_instances_clear(hdev);
2783 	hci_adv_monitors_clear(hdev);
2784 	hci_bdaddr_list_clear(&hdev->le_accept_list);
2785 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2786 	hci_conn_params_clear_all(hdev);
2787 	hci_discovery_filter_clear(hdev);
2788 	hci_blocked_keys_clear(hdev);
2789 	hci_codec_list_clear(&hdev->local_codecs);
2790 	hci_dev_unlock(hdev);
2791 
2792 	ida_simple_remove(&hci_index_ida, hdev->id);
2793 	kfree_skb(hdev->sent_cmd);
2794 	kfree_skb(hdev->recv_event);
2795 	kfree(hdev);
2796 }
2797 EXPORT_SYMBOL(hci_release_dev);
2798 
hci_register_suspend_notifier(struct hci_dev * hdev)2799 int hci_register_suspend_notifier(struct hci_dev *hdev)
2800 {
2801 	int ret = 0;
2802 
2803 	if (!hdev->suspend_notifier.notifier_call &&
2804 	    !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2805 		hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2806 		ret = register_pm_notifier(&hdev->suspend_notifier);
2807 	}
2808 
2809 	return ret;
2810 }
2811 
hci_unregister_suspend_notifier(struct hci_dev * hdev)2812 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2813 {
2814 	int ret = 0;
2815 
2816 	if (hdev->suspend_notifier.notifier_call) {
2817 		ret = unregister_pm_notifier(&hdev->suspend_notifier);
2818 		if (!ret)
2819 			hdev->suspend_notifier.notifier_call = NULL;
2820 	}
2821 
2822 	return ret;
2823 }
2824 
2825 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2826 int hci_suspend_dev(struct hci_dev *hdev)
2827 {
2828 	int ret;
2829 
2830 	bt_dev_dbg(hdev, "");
2831 
2832 	/* Suspend should only act on when powered. */
2833 	if (!hdev_is_powered(hdev) ||
2834 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2835 		return 0;
2836 
2837 	/* If powering down don't attempt to suspend */
2838 	if (mgmt_powering_down(hdev))
2839 		return 0;
2840 
2841 	/* Cancel potentially blocking sync operation before suspend */
2842 	__hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
2843 
2844 	hci_req_sync_lock(hdev);
2845 	ret = hci_suspend_sync(hdev);
2846 	hci_req_sync_unlock(hdev);
2847 
2848 	hci_clear_wake_reason(hdev);
2849 	mgmt_suspending(hdev, hdev->suspend_state);
2850 
2851 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2852 	return ret;
2853 }
2854 EXPORT_SYMBOL(hci_suspend_dev);
2855 
2856 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2857 int hci_resume_dev(struct hci_dev *hdev)
2858 {
2859 	int ret;
2860 
2861 	bt_dev_dbg(hdev, "");
2862 
2863 	/* Resume should only act on when powered. */
2864 	if (!hdev_is_powered(hdev) ||
2865 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2866 		return 0;
2867 
2868 	/* If powering down don't attempt to resume */
2869 	if (mgmt_powering_down(hdev))
2870 		return 0;
2871 
2872 	hci_req_sync_lock(hdev);
2873 	ret = hci_resume_sync(hdev);
2874 	hci_req_sync_unlock(hdev);
2875 
2876 	mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2877 		      hdev->wake_addr_type);
2878 
2879 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2880 	return ret;
2881 }
2882 EXPORT_SYMBOL(hci_resume_dev);
2883 
2884 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2885 int hci_reset_dev(struct hci_dev *hdev)
2886 {
2887 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2888 	struct sk_buff *skb;
2889 
2890 	skb = bt_skb_alloc(3, GFP_ATOMIC);
2891 	if (!skb)
2892 		return -ENOMEM;
2893 
2894 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2895 	skb_put_data(skb, hw_err, 3);
2896 
2897 	bt_dev_err(hdev, "Injecting HCI hardware error event");
2898 
2899 	/* Send Hardware Error to upper stack */
2900 	return hci_recv_frame(hdev, skb);
2901 }
2902 EXPORT_SYMBOL(hci_reset_dev);
2903 
2904 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2905 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2906 {
2907 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2908 		      && !test_bit(HCI_INIT, &hdev->flags))) {
2909 		kfree_skb(skb);
2910 		return -ENXIO;
2911 	}
2912 
2913 	switch (hci_skb_pkt_type(skb)) {
2914 	case HCI_EVENT_PKT:
2915 		break;
2916 	case HCI_ACLDATA_PKT:
2917 		/* Detect if ISO packet has been sent as ACL */
2918 		if (hci_conn_num(hdev, ISO_LINK)) {
2919 			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2920 			__u8 type;
2921 
2922 			type = hci_conn_lookup_type(hdev, hci_handle(handle));
2923 			if (type == ISO_LINK)
2924 				hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2925 		}
2926 		break;
2927 	case HCI_SCODATA_PKT:
2928 		break;
2929 	case HCI_ISODATA_PKT:
2930 		break;
2931 	default:
2932 		kfree_skb(skb);
2933 		return -EINVAL;
2934 	}
2935 
2936 	/* Incoming skb */
2937 	bt_cb(skb)->incoming = 1;
2938 
2939 	/* Time stamp */
2940 	__net_timestamp(skb);
2941 
2942 	skb_queue_tail(&hdev->rx_q, skb);
2943 	queue_work(hdev->workqueue, &hdev->rx_work);
2944 
2945 	return 0;
2946 }
2947 EXPORT_SYMBOL(hci_recv_frame);
2948 
2949 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2950 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2951 {
2952 	/* Mark as diagnostic packet */
2953 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2954 
2955 	/* Time stamp */
2956 	__net_timestamp(skb);
2957 
2958 	skb_queue_tail(&hdev->rx_q, skb);
2959 	queue_work(hdev->workqueue, &hdev->rx_work);
2960 
2961 	return 0;
2962 }
2963 EXPORT_SYMBOL(hci_recv_diag);
2964 
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2965 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2966 {
2967 	va_list vargs;
2968 
2969 	va_start(vargs, fmt);
2970 	kfree_const(hdev->hw_info);
2971 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2972 	va_end(vargs);
2973 }
2974 EXPORT_SYMBOL(hci_set_hw_info);
2975 
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2976 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2977 {
2978 	va_list vargs;
2979 
2980 	va_start(vargs, fmt);
2981 	kfree_const(hdev->fw_info);
2982 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2983 	va_end(vargs);
2984 }
2985 EXPORT_SYMBOL(hci_set_fw_info);
2986 
2987 /* ---- Interface to upper protocols ---- */
2988 
hci_register_cb(struct hci_cb * cb)2989 int hci_register_cb(struct hci_cb *cb)
2990 {
2991 	BT_DBG("%p name %s", cb, cb->name);
2992 
2993 	mutex_lock(&hci_cb_list_lock);
2994 	list_add_tail(&cb->list, &hci_cb_list);
2995 	mutex_unlock(&hci_cb_list_lock);
2996 
2997 	return 0;
2998 }
2999 EXPORT_SYMBOL(hci_register_cb);
3000 
hci_unregister_cb(struct hci_cb * cb)3001 int hci_unregister_cb(struct hci_cb *cb)
3002 {
3003 	BT_DBG("%p name %s", cb, cb->name);
3004 
3005 	mutex_lock(&hci_cb_list_lock);
3006 	list_del(&cb->list);
3007 	mutex_unlock(&hci_cb_list_lock);
3008 
3009 	return 0;
3010 }
3011 EXPORT_SYMBOL(hci_unregister_cb);
3012 
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3013 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3014 {
3015 	int err;
3016 
3017 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3018 	       skb->len);
3019 
3020 	/* Time stamp */
3021 	__net_timestamp(skb);
3022 
3023 	/* Send copy to monitor */
3024 	hci_send_to_monitor(hdev, skb);
3025 
3026 	if (atomic_read(&hdev->promisc)) {
3027 		/* Send copy to the sockets */
3028 		hci_send_to_sock(hdev, skb);
3029 	}
3030 
3031 	/* Get rid of skb owner, prior to sending to the driver. */
3032 	skb_orphan(skb);
3033 
3034 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3035 		kfree_skb(skb);
3036 		return -EINVAL;
3037 	}
3038 
3039 	err = hdev->send(hdev, skb);
3040 	if (err < 0) {
3041 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3042 		kfree_skb(skb);
3043 		return err;
3044 	}
3045 
3046 	return 0;
3047 }
3048 
3049 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3050 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3051 		 const void *param)
3052 {
3053 	struct sk_buff *skb;
3054 
3055 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3056 
3057 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3058 	if (!skb) {
3059 		bt_dev_err(hdev, "no memory for command");
3060 		return -ENOMEM;
3061 	}
3062 
3063 	/* Stand-alone HCI commands must be flagged as
3064 	 * single-command requests.
3065 	 */
3066 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3067 
3068 	skb_queue_tail(&hdev->cmd_q, skb);
3069 	queue_work(hdev->workqueue, &hdev->cmd_work);
3070 
3071 	return 0;
3072 }
3073 
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3074 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3075 		   const void *param)
3076 {
3077 	struct sk_buff *skb;
3078 
3079 	if (hci_opcode_ogf(opcode) != 0x3f) {
3080 		/* A controller receiving a command shall respond with either
3081 		 * a Command Status Event or a Command Complete Event.
3082 		 * Therefore, all standard HCI commands must be sent via the
3083 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3084 		 * Some vendors do not comply with this rule for vendor-specific
3085 		 * commands and do not return any event. We want to support
3086 		 * unresponded commands for such cases only.
3087 		 */
3088 		bt_dev_err(hdev, "unresponded command not supported");
3089 		return -EINVAL;
3090 	}
3091 
3092 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3093 	if (!skb) {
3094 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3095 			   opcode);
3096 		return -ENOMEM;
3097 	}
3098 
3099 	hci_send_frame(hdev, skb);
3100 
3101 	return 0;
3102 }
3103 EXPORT_SYMBOL(__hci_cmd_send);
3104 
3105 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3106 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3107 {
3108 	struct hci_command_hdr *hdr;
3109 
3110 	if (!hdev->sent_cmd)
3111 		return NULL;
3112 
3113 	hdr = (void *) hdev->sent_cmd->data;
3114 
3115 	if (hdr->opcode != cpu_to_le16(opcode))
3116 		return NULL;
3117 
3118 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3119 
3120 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3121 }
3122 
3123 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3124 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3125 {
3126 	struct hci_event_hdr *hdr;
3127 	int offset;
3128 
3129 	if (!hdev->recv_event)
3130 		return NULL;
3131 
3132 	hdr = (void *)hdev->recv_event->data;
3133 	offset = sizeof(*hdr);
3134 
3135 	if (hdr->evt != event) {
3136 		/* In case of LE metaevent check the subevent match */
3137 		if (hdr->evt == HCI_EV_LE_META) {
3138 			struct hci_ev_le_meta *ev;
3139 
3140 			ev = (void *)hdev->recv_event->data + offset;
3141 			offset += sizeof(*ev);
3142 			if (ev->subevent == event)
3143 				goto found;
3144 		}
3145 		return NULL;
3146 	}
3147 
3148 found:
3149 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
3150 
3151 	return hdev->recv_event->data + offset;
3152 }
3153 
3154 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3155 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3156 {
3157 	struct hci_acl_hdr *hdr;
3158 	int len = skb->len;
3159 
3160 	skb_push(skb, HCI_ACL_HDR_SIZE);
3161 	skb_reset_transport_header(skb);
3162 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3163 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3164 	hdr->dlen   = cpu_to_le16(len);
3165 }
3166 
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3167 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3168 			  struct sk_buff *skb, __u16 flags)
3169 {
3170 	struct hci_conn *conn = chan->conn;
3171 	struct hci_dev *hdev = conn->hdev;
3172 	struct sk_buff *list;
3173 
3174 	skb->len = skb_headlen(skb);
3175 	skb->data_len = 0;
3176 
3177 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3178 
3179 	switch (hdev->dev_type) {
3180 	case HCI_PRIMARY:
3181 		hci_add_acl_hdr(skb, conn->handle, flags);
3182 		break;
3183 	case HCI_AMP:
3184 		hci_add_acl_hdr(skb, chan->handle, flags);
3185 		break;
3186 	default:
3187 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3188 		return;
3189 	}
3190 
3191 	list = skb_shinfo(skb)->frag_list;
3192 	if (!list) {
3193 		/* Non fragmented */
3194 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3195 
3196 		skb_queue_tail(queue, skb);
3197 	} else {
3198 		/* Fragmented */
3199 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3200 
3201 		skb_shinfo(skb)->frag_list = NULL;
3202 
3203 		/* Queue all fragments atomically. We need to use spin_lock_bh
3204 		 * here because of 6LoWPAN links, as there this function is
3205 		 * called from softirq and using normal spin lock could cause
3206 		 * deadlocks.
3207 		 */
3208 		spin_lock_bh(&queue->lock);
3209 
3210 		__skb_queue_tail(queue, skb);
3211 
3212 		flags &= ~ACL_START;
3213 		flags |= ACL_CONT;
3214 		do {
3215 			skb = list; list = list->next;
3216 
3217 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3218 			hci_add_acl_hdr(skb, conn->handle, flags);
3219 
3220 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3221 
3222 			__skb_queue_tail(queue, skb);
3223 		} while (list);
3224 
3225 		spin_unlock_bh(&queue->lock);
3226 	}
3227 }
3228 
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3229 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3230 {
3231 	struct hci_dev *hdev = chan->conn->hdev;
3232 
3233 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3234 
3235 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3236 
3237 	queue_work(hdev->workqueue, &hdev->tx_work);
3238 }
3239 
3240 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3241 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3242 {
3243 	struct hci_dev *hdev = conn->hdev;
3244 	struct hci_sco_hdr hdr;
3245 
3246 	BT_DBG("%s len %d", hdev->name, skb->len);
3247 
3248 	hdr.handle = cpu_to_le16(conn->handle);
3249 	hdr.dlen   = skb->len;
3250 
3251 	skb_push(skb, HCI_SCO_HDR_SIZE);
3252 	skb_reset_transport_header(skb);
3253 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3254 
3255 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3256 
3257 	skb_queue_tail(&conn->data_q, skb);
3258 	queue_work(hdev->workqueue, &hdev->tx_work);
3259 }
3260 
3261 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3262 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3263 {
3264 	struct hci_iso_hdr *hdr;
3265 	int len = skb->len;
3266 
3267 	skb_push(skb, HCI_ISO_HDR_SIZE);
3268 	skb_reset_transport_header(skb);
3269 	hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3270 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3271 	hdr->dlen   = cpu_to_le16(len);
3272 }
3273 
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3274 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3275 			  struct sk_buff *skb)
3276 {
3277 	struct hci_dev *hdev = conn->hdev;
3278 	struct sk_buff *list;
3279 	__u16 flags;
3280 
3281 	skb->len = skb_headlen(skb);
3282 	skb->data_len = 0;
3283 
3284 	hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3285 
3286 	list = skb_shinfo(skb)->frag_list;
3287 
3288 	flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3289 	hci_add_iso_hdr(skb, conn->handle, flags);
3290 
3291 	if (!list) {
3292 		/* Non fragmented */
3293 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3294 
3295 		skb_queue_tail(queue, skb);
3296 	} else {
3297 		/* Fragmented */
3298 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3299 
3300 		skb_shinfo(skb)->frag_list = NULL;
3301 
3302 		__skb_queue_tail(queue, skb);
3303 
3304 		do {
3305 			skb = list; list = list->next;
3306 
3307 			hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3308 			flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3309 						   0x00);
3310 			hci_add_iso_hdr(skb, conn->handle, flags);
3311 
3312 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3313 
3314 			__skb_queue_tail(queue, skb);
3315 		} while (list);
3316 	}
3317 }
3318 
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3319 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3320 {
3321 	struct hci_dev *hdev = conn->hdev;
3322 
3323 	BT_DBG("%s len %d", hdev->name, skb->len);
3324 
3325 	hci_queue_iso(conn, &conn->data_q, skb);
3326 
3327 	queue_work(hdev->workqueue, &hdev->tx_work);
3328 }
3329 
3330 /* ---- HCI TX task (outgoing data) ---- */
3331 
3332 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3333 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3334 {
3335 	struct hci_dev *hdev;
3336 	int cnt, q;
3337 
3338 	if (!conn) {
3339 		*quote = 0;
3340 		return;
3341 	}
3342 
3343 	hdev = conn->hdev;
3344 
3345 	switch (conn->type) {
3346 	case ACL_LINK:
3347 		cnt = hdev->acl_cnt;
3348 		break;
3349 	case AMP_LINK:
3350 		cnt = hdev->block_cnt;
3351 		break;
3352 	case SCO_LINK:
3353 	case ESCO_LINK:
3354 		cnt = hdev->sco_cnt;
3355 		break;
3356 	case LE_LINK:
3357 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3358 		break;
3359 	case ISO_LINK:
3360 		cnt = hdev->iso_mtu ? hdev->iso_cnt :
3361 			hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3362 		break;
3363 	default:
3364 		cnt = 0;
3365 		bt_dev_err(hdev, "unknown link type %d", conn->type);
3366 	}
3367 
3368 	q = cnt / num;
3369 	*quote = q ? q : 1;
3370 }
3371 
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3372 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3373 				     int *quote)
3374 {
3375 	struct hci_conn_hash *h = &hdev->conn_hash;
3376 	struct hci_conn *conn = NULL, *c;
3377 	unsigned int num = 0, min = ~0;
3378 
3379 	/* We don't have to lock device here. Connections are always
3380 	 * added and removed with TX task disabled. */
3381 
3382 	rcu_read_lock();
3383 
3384 	list_for_each_entry_rcu(c, &h->list, list) {
3385 		if (c->type != type || skb_queue_empty(&c->data_q))
3386 			continue;
3387 
3388 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3389 			continue;
3390 
3391 		num++;
3392 
3393 		if (c->sent < min) {
3394 			min  = c->sent;
3395 			conn = c;
3396 		}
3397 
3398 		if (hci_conn_num(hdev, type) == num)
3399 			break;
3400 	}
3401 
3402 	rcu_read_unlock();
3403 
3404 	hci_quote_sent(conn, num, quote);
3405 
3406 	BT_DBG("conn %p quote %d", conn, *quote);
3407 	return conn;
3408 }
3409 
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3410 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3411 {
3412 	struct hci_conn_hash *h = &hdev->conn_hash;
3413 	struct hci_conn *c;
3414 
3415 	bt_dev_err(hdev, "link tx timeout");
3416 
3417 	rcu_read_lock();
3418 
3419 	/* Kill stalled connections */
3420 	list_for_each_entry_rcu(c, &h->list, list) {
3421 		if (c->type == type && c->sent) {
3422 			bt_dev_err(hdev, "killing stalled connection %pMR",
3423 				   &c->dst);
3424 			/* hci_disconnect might sleep, so, we have to release
3425 			 * the RCU read lock before calling it.
3426 			 */
3427 			rcu_read_unlock();
3428 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3429 			rcu_read_lock();
3430 		}
3431 	}
3432 
3433 	rcu_read_unlock();
3434 }
3435 
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3436 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3437 				      int *quote)
3438 {
3439 	struct hci_conn_hash *h = &hdev->conn_hash;
3440 	struct hci_chan *chan = NULL;
3441 	unsigned int num = 0, min = ~0, cur_prio = 0;
3442 	struct hci_conn *conn;
3443 	int conn_num = 0;
3444 
3445 	BT_DBG("%s", hdev->name);
3446 
3447 	rcu_read_lock();
3448 
3449 	list_for_each_entry_rcu(conn, &h->list, list) {
3450 		struct hci_chan *tmp;
3451 
3452 		if (conn->type != type)
3453 			continue;
3454 
3455 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3456 			continue;
3457 
3458 		conn_num++;
3459 
3460 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3461 			struct sk_buff *skb;
3462 
3463 			if (skb_queue_empty(&tmp->data_q))
3464 				continue;
3465 
3466 			skb = skb_peek(&tmp->data_q);
3467 			if (skb->priority < cur_prio)
3468 				continue;
3469 
3470 			if (skb->priority > cur_prio) {
3471 				num = 0;
3472 				min = ~0;
3473 				cur_prio = skb->priority;
3474 			}
3475 
3476 			num++;
3477 
3478 			if (conn->sent < min) {
3479 				min  = conn->sent;
3480 				chan = tmp;
3481 			}
3482 		}
3483 
3484 		if (hci_conn_num(hdev, type) == conn_num)
3485 			break;
3486 	}
3487 
3488 	rcu_read_unlock();
3489 
3490 	if (!chan)
3491 		return NULL;
3492 
3493 	hci_quote_sent(chan->conn, num, quote);
3494 
3495 	BT_DBG("chan %p quote %d", chan, *quote);
3496 	return chan;
3497 }
3498 
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3499 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3500 {
3501 	struct hci_conn_hash *h = &hdev->conn_hash;
3502 	struct hci_conn *conn;
3503 	int num = 0;
3504 
3505 	BT_DBG("%s", hdev->name);
3506 
3507 	rcu_read_lock();
3508 
3509 	list_for_each_entry_rcu(conn, &h->list, list) {
3510 		struct hci_chan *chan;
3511 
3512 		if (conn->type != type)
3513 			continue;
3514 
3515 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3516 			continue;
3517 
3518 		num++;
3519 
3520 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3521 			struct sk_buff *skb;
3522 
3523 			if (chan->sent) {
3524 				chan->sent = 0;
3525 				continue;
3526 			}
3527 
3528 			if (skb_queue_empty(&chan->data_q))
3529 				continue;
3530 
3531 			skb = skb_peek(&chan->data_q);
3532 			if (skb->priority >= HCI_PRIO_MAX - 1)
3533 				continue;
3534 
3535 			skb->priority = HCI_PRIO_MAX - 1;
3536 
3537 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3538 			       skb->priority);
3539 		}
3540 
3541 		if (hci_conn_num(hdev, type) == num)
3542 			break;
3543 	}
3544 
3545 	rcu_read_unlock();
3546 
3547 }
3548 
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)3549 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3550 {
3551 	/* Calculate count of blocks used by this packet */
3552 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3553 }
3554 
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3555 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3556 {
3557 	unsigned long last_tx;
3558 
3559 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3560 		return;
3561 
3562 	switch (type) {
3563 	case LE_LINK:
3564 		last_tx = hdev->le_last_tx;
3565 		break;
3566 	default:
3567 		last_tx = hdev->acl_last_tx;
3568 		break;
3569 	}
3570 
3571 	/* tx timeout must be longer than maximum link supervision timeout
3572 	 * (40.9 seconds)
3573 	 */
3574 	if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3575 		hci_link_tx_to(hdev, type);
3576 }
3577 
3578 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3579 static void hci_sched_sco(struct hci_dev *hdev)
3580 {
3581 	struct hci_conn *conn;
3582 	struct sk_buff *skb;
3583 	int quote;
3584 
3585 	BT_DBG("%s", hdev->name);
3586 
3587 	if (!hci_conn_num(hdev, SCO_LINK))
3588 		return;
3589 
3590 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3591 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3592 			BT_DBG("skb %p len %d", skb, skb->len);
3593 			hci_send_frame(hdev, skb);
3594 
3595 			conn->sent++;
3596 			if (conn->sent == ~0)
3597 				conn->sent = 0;
3598 		}
3599 	}
3600 }
3601 
hci_sched_esco(struct hci_dev * hdev)3602 static void hci_sched_esco(struct hci_dev *hdev)
3603 {
3604 	struct hci_conn *conn;
3605 	struct sk_buff *skb;
3606 	int quote;
3607 
3608 	BT_DBG("%s", hdev->name);
3609 
3610 	if (!hci_conn_num(hdev, ESCO_LINK))
3611 		return;
3612 
3613 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3614 						     &quote))) {
3615 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3616 			BT_DBG("skb %p len %d", skb, skb->len);
3617 			hci_send_frame(hdev, skb);
3618 
3619 			conn->sent++;
3620 			if (conn->sent == ~0)
3621 				conn->sent = 0;
3622 		}
3623 	}
3624 }
3625 
hci_sched_acl_pkt(struct hci_dev * hdev)3626 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3627 {
3628 	unsigned int cnt = hdev->acl_cnt;
3629 	struct hci_chan *chan;
3630 	struct sk_buff *skb;
3631 	int quote;
3632 
3633 	__check_timeout(hdev, cnt, ACL_LINK);
3634 
3635 	while (hdev->acl_cnt &&
3636 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3637 		u32 priority = (skb_peek(&chan->data_q))->priority;
3638 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3639 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3640 			       skb->len, skb->priority);
3641 
3642 			/* Stop if priority has changed */
3643 			if (skb->priority < priority)
3644 				break;
3645 
3646 			skb = skb_dequeue(&chan->data_q);
3647 
3648 			hci_conn_enter_active_mode(chan->conn,
3649 						   bt_cb(skb)->force_active);
3650 
3651 			hci_send_frame(hdev, skb);
3652 			hdev->acl_last_tx = jiffies;
3653 
3654 			hdev->acl_cnt--;
3655 			chan->sent++;
3656 			chan->conn->sent++;
3657 
3658 			/* Send pending SCO packets right away */
3659 			hci_sched_sco(hdev);
3660 			hci_sched_esco(hdev);
3661 		}
3662 	}
3663 
3664 	if (cnt != hdev->acl_cnt)
3665 		hci_prio_recalculate(hdev, ACL_LINK);
3666 }
3667 
hci_sched_acl_blk(struct hci_dev * hdev)3668 static void hci_sched_acl_blk(struct hci_dev *hdev)
3669 {
3670 	unsigned int cnt = hdev->block_cnt;
3671 	struct hci_chan *chan;
3672 	struct sk_buff *skb;
3673 	int quote;
3674 	u8 type;
3675 
3676 	BT_DBG("%s", hdev->name);
3677 
3678 	if (hdev->dev_type == HCI_AMP)
3679 		type = AMP_LINK;
3680 	else
3681 		type = ACL_LINK;
3682 
3683 	__check_timeout(hdev, cnt, type);
3684 
3685 	while (hdev->block_cnt > 0 &&
3686 	       (chan = hci_chan_sent(hdev, type, &quote))) {
3687 		u32 priority = (skb_peek(&chan->data_q))->priority;
3688 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3689 			int blocks;
3690 
3691 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3692 			       skb->len, skb->priority);
3693 
3694 			/* Stop if priority has changed */
3695 			if (skb->priority < priority)
3696 				break;
3697 
3698 			skb = skb_dequeue(&chan->data_q);
3699 
3700 			blocks = __get_blocks(hdev, skb);
3701 			if (blocks > hdev->block_cnt)
3702 				return;
3703 
3704 			hci_conn_enter_active_mode(chan->conn,
3705 						   bt_cb(skb)->force_active);
3706 
3707 			hci_send_frame(hdev, skb);
3708 			hdev->acl_last_tx = jiffies;
3709 
3710 			hdev->block_cnt -= blocks;
3711 			quote -= blocks;
3712 
3713 			chan->sent += blocks;
3714 			chan->conn->sent += blocks;
3715 		}
3716 	}
3717 
3718 	if (cnt != hdev->block_cnt)
3719 		hci_prio_recalculate(hdev, type);
3720 }
3721 
hci_sched_acl(struct hci_dev * hdev)3722 static void hci_sched_acl(struct hci_dev *hdev)
3723 {
3724 	BT_DBG("%s", hdev->name);
3725 
3726 	/* No ACL link over BR/EDR controller */
3727 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3728 		return;
3729 
3730 	/* No AMP link over AMP controller */
3731 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3732 		return;
3733 
3734 	switch (hdev->flow_ctl_mode) {
3735 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
3736 		hci_sched_acl_pkt(hdev);
3737 		break;
3738 
3739 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3740 		hci_sched_acl_blk(hdev);
3741 		break;
3742 	}
3743 }
3744 
hci_sched_le(struct hci_dev * hdev)3745 static void hci_sched_le(struct hci_dev *hdev)
3746 {
3747 	struct hci_chan *chan;
3748 	struct sk_buff *skb;
3749 	int quote, cnt, tmp;
3750 
3751 	BT_DBG("%s", hdev->name);
3752 
3753 	if (!hci_conn_num(hdev, LE_LINK))
3754 		return;
3755 
3756 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3757 
3758 	__check_timeout(hdev, cnt, LE_LINK);
3759 
3760 	tmp = cnt;
3761 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3762 		u32 priority = (skb_peek(&chan->data_q))->priority;
3763 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3764 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3765 			       skb->len, skb->priority);
3766 
3767 			/* Stop if priority has changed */
3768 			if (skb->priority < priority)
3769 				break;
3770 
3771 			skb = skb_dequeue(&chan->data_q);
3772 
3773 			hci_send_frame(hdev, skb);
3774 			hdev->le_last_tx = jiffies;
3775 
3776 			cnt--;
3777 			chan->sent++;
3778 			chan->conn->sent++;
3779 
3780 			/* Send pending SCO packets right away */
3781 			hci_sched_sco(hdev);
3782 			hci_sched_esco(hdev);
3783 		}
3784 	}
3785 
3786 	if (hdev->le_pkts)
3787 		hdev->le_cnt = cnt;
3788 	else
3789 		hdev->acl_cnt = cnt;
3790 
3791 	if (cnt != tmp)
3792 		hci_prio_recalculate(hdev, LE_LINK);
3793 }
3794 
3795 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3796 static void hci_sched_iso(struct hci_dev *hdev)
3797 {
3798 	struct hci_conn *conn;
3799 	struct sk_buff *skb;
3800 	int quote, *cnt;
3801 
3802 	BT_DBG("%s", hdev->name);
3803 
3804 	if (!hci_conn_num(hdev, ISO_LINK))
3805 		return;
3806 
3807 	cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3808 		hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3809 	while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3810 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3811 			BT_DBG("skb %p len %d", skb, skb->len);
3812 			hci_send_frame(hdev, skb);
3813 
3814 			conn->sent++;
3815 			if (conn->sent == ~0)
3816 				conn->sent = 0;
3817 			(*cnt)--;
3818 		}
3819 	}
3820 }
3821 
hci_tx_work(struct work_struct * work)3822 static void hci_tx_work(struct work_struct *work)
3823 {
3824 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3825 	struct sk_buff *skb;
3826 
3827 	BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3828 	       hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3829 
3830 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3831 		/* Schedule queues and send stuff to HCI driver */
3832 		hci_sched_sco(hdev);
3833 		hci_sched_esco(hdev);
3834 		hci_sched_iso(hdev);
3835 		hci_sched_acl(hdev);
3836 		hci_sched_le(hdev);
3837 	}
3838 
3839 	/* Send next queued raw (unknown type) packet */
3840 	while ((skb = skb_dequeue(&hdev->raw_q)))
3841 		hci_send_frame(hdev, skb);
3842 }
3843 
3844 /* ----- HCI RX task (incoming data processing) ----- */
3845 
3846 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3847 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3848 {
3849 	struct hci_acl_hdr *hdr = (void *) skb->data;
3850 	struct hci_conn *conn;
3851 	__u16 handle, flags;
3852 
3853 	skb_pull(skb, HCI_ACL_HDR_SIZE);
3854 
3855 	handle = __le16_to_cpu(hdr->handle);
3856 	flags  = hci_flags(handle);
3857 	handle = hci_handle(handle);
3858 
3859 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3860 	       handle, flags);
3861 
3862 	hdev->stat.acl_rx++;
3863 
3864 	hci_dev_lock(hdev);
3865 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3866 	hci_dev_unlock(hdev);
3867 
3868 	if (conn) {
3869 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3870 
3871 		/* Send to upper protocol */
3872 		l2cap_recv_acldata(conn, skb, flags);
3873 		return;
3874 	} else {
3875 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3876 			   handle);
3877 	}
3878 
3879 	kfree_skb(skb);
3880 }
3881 
3882 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3883 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3884 {
3885 	struct hci_sco_hdr *hdr = (void *) skb->data;
3886 	struct hci_conn *conn;
3887 	__u16 handle, flags;
3888 
3889 	skb_pull(skb, HCI_SCO_HDR_SIZE);
3890 
3891 	handle = __le16_to_cpu(hdr->handle);
3892 	flags  = hci_flags(handle);
3893 	handle = hci_handle(handle);
3894 
3895 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3896 	       handle, flags);
3897 
3898 	hdev->stat.sco_rx++;
3899 
3900 	hci_dev_lock(hdev);
3901 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3902 	hci_dev_unlock(hdev);
3903 
3904 	if (conn) {
3905 		/* Send to upper protocol */
3906 		hci_skb_pkt_status(skb) = flags & 0x03;
3907 		sco_recv_scodata(conn, skb);
3908 		return;
3909 	} else {
3910 		bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3911 				       handle);
3912 	}
3913 
3914 	kfree_skb(skb);
3915 }
3916 
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3917 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3918 {
3919 	struct hci_iso_hdr *hdr;
3920 	struct hci_conn *conn;
3921 	__u16 handle, flags;
3922 
3923 	hdr = skb_pull_data(skb, sizeof(*hdr));
3924 	if (!hdr) {
3925 		bt_dev_err(hdev, "ISO packet too small");
3926 		goto drop;
3927 	}
3928 
3929 	handle = __le16_to_cpu(hdr->handle);
3930 	flags  = hci_flags(handle);
3931 	handle = hci_handle(handle);
3932 
3933 	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3934 		   handle, flags);
3935 
3936 	hci_dev_lock(hdev);
3937 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3938 	hci_dev_unlock(hdev);
3939 
3940 	if (!conn) {
3941 		bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3942 			   handle);
3943 		goto drop;
3944 	}
3945 
3946 	/* Send to upper protocol */
3947 	iso_recv(conn, skb, flags);
3948 	return;
3949 
3950 drop:
3951 	kfree_skb(skb);
3952 }
3953 
hci_req_is_complete(struct hci_dev * hdev)3954 static bool hci_req_is_complete(struct hci_dev *hdev)
3955 {
3956 	struct sk_buff *skb;
3957 
3958 	skb = skb_peek(&hdev->cmd_q);
3959 	if (!skb)
3960 		return true;
3961 
3962 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3963 }
3964 
hci_resend_last(struct hci_dev * hdev)3965 static void hci_resend_last(struct hci_dev *hdev)
3966 {
3967 	struct hci_command_hdr *sent;
3968 	struct sk_buff *skb;
3969 	u16 opcode;
3970 
3971 	if (!hdev->sent_cmd)
3972 		return;
3973 
3974 	sent = (void *) hdev->sent_cmd->data;
3975 	opcode = __le16_to_cpu(sent->opcode);
3976 	if (opcode == HCI_OP_RESET)
3977 		return;
3978 
3979 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3980 	if (!skb)
3981 		return;
3982 
3983 	skb_queue_head(&hdev->cmd_q, skb);
3984 	queue_work(hdev->workqueue, &hdev->cmd_work);
3985 }
3986 
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3987 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3988 			  hci_req_complete_t *req_complete,
3989 			  hci_req_complete_skb_t *req_complete_skb)
3990 {
3991 	struct sk_buff *skb;
3992 	unsigned long flags;
3993 
3994 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3995 
3996 	/* If the completed command doesn't match the last one that was
3997 	 * sent we need to do special handling of it.
3998 	 */
3999 	if (!hci_sent_cmd_data(hdev, opcode)) {
4000 		/* Some CSR based controllers generate a spontaneous
4001 		 * reset complete event during init and any pending
4002 		 * command will never be completed. In such a case we
4003 		 * need to resend whatever was the last sent
4004 		 * command.
4005 		 */
4006 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4007 			hci_resend_last(hdev);
4008 
4009 		return;
4010 	}
4011 
4012 	/* If we reach this point this event matches the last command sent */
4013 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4014 
4015 	/* If the command succeeded and there's still more commands in
4016 	 * this request the request is not yet complete.
4017 	 */
4018 	if (!status && !hci_req_is_complete(hdev))
4019 		return;
4020 
4021 	/* If this was the last command in a request the complete
4022 	 * callback would be found in hdev->sent_cmd instead of the
4023 	 * command queue (hdev->cmd_q).
4024 	 */
4025 	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4026 		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4027 		return;
4028 	}
4029 
4030 	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4031 		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4032 		return;
4033 	}
4034 
4035 	/* Remove all pending commands belonging to this request */
4036 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4037 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4038 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4039 			__skb_queue_head(&hdev->cmd_q, skb);
4040 			break;
4041 		}
4042 
4043 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4044 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4045 		else
4046 			*req_complete = bt_cb(skb)->hci.req_complete;
4047 		dev_kfree_skb_irq(skb);
4048 	}
4049 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4050 }
4051 
hci_rx_work(struct work_struct * work)4052 static void hci_rx_work(struct work_struct *work)
4053 {
4054 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4055 	struct sk_buff *skb;
4056 
4057 	BT_DBG("%s", hdev->name);
4058 
4059 	/* The kcov_remote functions used for collecting packet parsing
4060 	 * coverage information from this background thread and associate
4061 	 * the coverage with the syscall's thread which originally injected
4062 	 * the packet. This helps fuzzing the kernel.
4063 	 */
4064 	for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4065 		kcov_remote_start_common(skb_get_kcov_handle(skb));
4066 
4067 		/* Send copy to monitor */
4068 		hci_send_to_monitor(hdev, skb);
4069 
4070 		if (atomic_read(&hdev->promisc)) {
4071 			/* Send copy to the sockets */
4072 			hci_send_to_sock(hdev, skb);
4073 		}
4074 
4075 		/* If the device has been opened in HCI_USER_CHANNEL,
4076 		 * the userspace has exclusive access to device.
4077 		 * When device is HCI_INIT, we still need to process
4078 		 * the data packets to the driver in order
4079 		 * to complete its setup().
4080 		 */
4081 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4082 		    !test_bit(HCI_INIT, &hdev->flags)) {
4083 			kfree_skb(skb);
4084 			continue;
4085 		}
4086 
4087 		if (test_bit(HCI_INIT, &hdev->flags)) {
4088 			/* Don't process data packets in this states. */
4089 			switch (hci_skb_pkt_type(skb)) {
4090 			case HCI_ACLDATA_PKT:
4091 			case HCI_SCODATA_PKT:
4092 			case HCI_ISODATA_PKT:
4093 				kfree_skb(skb);
4094 				continue;
4095 			}
4096 		}
4097 
4098 		/* Process frame */
4099 		switch (hci_skb_pkt_type(skb)) {
4100 		case HCI_EVENT_PKT:
4101 			BT_DBG("%s Event packet", hdev->name);
4102 			hci_event_packet(hdev, skb);
4103 			break;
4104 
4105 		case HCI_ACLDATA_PKT:
4106 			BT_DBG("%s ACL data packet", hdev->name);
4107 			hci_acldata_packet(hdev, skb);
4108 			break;
4109 
4110 		case HCI_SCODATA_PKT:
4111 			BT_DBG("%s SCO data packet", hdev->name);
4112 			hci_scodata_packet(hdev, skb);
4113 			break;
4114 
4115 		case HCI_ISODATA_PKT:
4116 			BT_DBG("%s ISO data packet", hdev->name);
4117 			hci_isodata_packet(hdev, skb);
4118 			break;
4119 
4120 		default:
4121 			kfree_skb(skb);
4122 			break;
4123 		}
4124 	}
4125 }
4126 
hci_cmd_work(struct work_struct * work)4127 static void hci_cmd_work(struct work_struct *work)
4128 {
4129 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4130 	struct sk_buff *skb;
4131 
4132 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4133 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4134 
4135 	/* Send queued commands */
4136 	if (atomic_read(&hdev->cmd_cnt)) {
4137 		skb = skb_dequeue(&hdev->cmd_q);
4138 		if (!skb)
4139 			return;
4140 
4141 		kfree_skb(hdev->sent_cmd);
4142 
4143 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4144 		if (hdev->sent_cmd) {
4145 			int res;
4146 			if (hci_req_status_pend(hdev))
4147 				hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4148 			atomic_dec(&hdev->cmd_cnt);
4149 
4150 			res = hci_send_frame(hdev, skb);
4151 			if (res < 0)
4152 				__hci_cmd_sync_cancel(hdev, -res);
4153 
4154 			rcu_read_lock();
4155 			if (test_bit(HCI_RESET, &hdev->flags) ||
4156 			    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4157 				cancel_delayed_work(&hdev->cmd_timer);
4158 			else
4159 				queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4160 						   HCI_CMD_TIMEOUT);
4161 			rcu_read_unlock();
4162 		} else {
4163 			skb_queue_head(&hdev->cmd_q, skb);
4164 			queue_work(hdev->workqueue, &hdev->cmd_work);
4165 		}
4166 	}
4167 }
4168