1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <linux/property.h>
34 #include <asm/unaligned.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/mgmt.h>
40 
41 #include "hci_request.h"
42 #include "hci_debugfs.h"
43 #include "smp.h"
44 #include "leds.h"
45 
46 static void hci_rx_work(struct work_struct *work);
47 static void hci_cmd_work(struct work_struct *work);
48 static void hci_tx_work(struct work_struct *work);
49 
50 /* HCI device list */
51 LIST_HEAD(hci_dev_list);
52 DEFINE_RWLOCK(hci_dev_list_lock);
53 
54 /* HCI callback list */
55 LIST_HEAD(hci_cb_list);
56 DEFINE_MUTEX(hci_cb_list_lock);
57 
58 /* HCI ID Numbering */
59 static DEFINE_IDA(hci_index_ida);
60 
61 /* ---- HCI debugfs entries ---- */
62 
dut_mode_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)63 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
64 			     size_t count, loff_t *ppos)
65 {
66 	struct hci_dev *hdev = file->private_data;
67 	char buf[3];
68 
69 	buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
70 	buf[1] = '\n';
71 	buf[2] = '\0';
72 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
73 }
74 
dut_mode_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)75 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
76 			      size_t count, loff_t *ppos)
77 {
78 	struct hci_dev *hdev = file->private_data;
79 	struct sk_buff *skb;
80 	bool enable;
81 	int err;
82 
83 	if (!test_bit(HCI_UP, &hdev->flags))
84 		return -ENETDOWN;
85 
86 	err = kstrtobool_from_user(user_buf, count, &enable);
87 	if (err)
88 		return err;
89 
90 	if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
91 		return -EALREADY;
92 
93 	hci_req_sync_lock(hdev);
94 	if (enable)
95 		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
96 				     HCI_CMD_TIMEOUT);
97 	else
98 		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
99 				     HCI_CMD_TIMEOUT);
100 	hci_req_sync_unlock(hdev);
101 
102 	if (IS_ERR(skb))
103 		return PTR_ERR(skb);
104 
105 	kfree_skb(skb);
106 
107 	hci_dev_change_flag(hdev, HCI_DUT_MODE);
108 
109 	return count;
110 }
111 
112 static const struct file_operations dut_mode_fops = {
113 	.open		= simple_open,
114 	.read		= dut_mode_read,
115 	.write		= dut_mode_write,
116 	.llseek		= default_llseek,
117 };
118 
vendor_diag_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)119 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
120 				size_t count, loff_t *ppos)
121 {
122 	struct hci_dev *hdev = file->private_data;
123 	char buf[3];
124 
125 	buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
126 	buf[1] = '\n';
127 	buf[2] = '\0';
128 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
129 }
130 
vendor_diag_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)131 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
132 				 size_t count, loff_t *ppos)
133 {
134 	struct hci_dev *hdev = file->private_data;
135 	bool enable;
136 	int err;
137 
138 	err = kstrtobool_from_user(user_buf, count, &enable);
139 	if (err)
140 		return err;
141 
142 	/* When the diagnostic flags are not persistent and the transport
143 	 * is not active or in user channel operation, then there is no need
144 	 * for the vendor callback. Instead just store the desired value and
145 	 * the setting will be programmed when the controller gets powered on.
146 	 */
147 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
148 	    (!test_bit(HCI_RUNNING, &hdev->flags) ||
149 	     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
150 		goto done;
151 
152 	hci_req_sync_lock(hdev);
153 	err = hdev->set_diag(hdev, enable);
154 	hci_req_sync_unlock(hdev);
155 
156 	if (err < 0)
157 		return err;
158 
159 done:
160 	if (enable)
161 		hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
162 	else
163 		hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
164 
165 	return count;
166 }
167 
168 static const struct file_operations vendor_diag_fops = {
169 	.open		= simple_open,
170 	.read		= vendor_diag_read,
171 	.write		= vendor_diag_write,
172 	.llseek		= default_llseek,
173 };
174 
hci_debugfs_create_basic(struct hci_dev * hdev)175 static void hci_debugfs_create_basic(struct hci_dev *hdev)
176 {
177 	debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
178 			    &dut_mode_fops);
179 
180 	if (hdev->set_diag)
181 		debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
182 				    &vendor_diag_fops);
183 }
184 
hci_reset_req(struct hci_request * req,unsigned long opt)185 static int hci_reset_req(struct hci_request *req, unsigned long opt)
186 {
187 	BT_DBG("%s %ld", req->hdev->name, opt);
188 
189 	/* Reset device */
190 	set_bit(HCI_RESET, &req->hdev->flags);
191 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
192 	return 0;
193 }
194 
bredr_init(struct hci_request * req)195 static void bredr_init(struct hci_request *req)
196 {
197 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
198 
199 	/* Read Local Supported Features */
200 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
201 
202 	/* Read Local Version */
203 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
204 
205 	/* Read BD Address */
206 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
207 }
208 
amp_init1(struct hci_request * req)209 static void amp_init1(struct hci_request *req)
210 {
211 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
212 
213 	/* Read Local Version */
214 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 
216 	/* Read Local Supported Commands */
217 	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
218 
219 	/* Read Local AMP Info */
220 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
221 
222 	/* Read Data Blk size */
223 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
224 
225 	/* Read Flow Control Mode */
226 	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
227 
228 	/* Read Location Data */
229 	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
230 }
231 
amp_init2(struct hci_request * req)232 static int amp_init2(struct hci_request *req)
233 {
234 	/* Read Local Supported Features. Not all AMP controllers
235 	 * support this so it's placed conditionally in the second
236 	 * stage init.
237 	 */
238 	if (req->hdev->commands[14] & 0x20)
239 		hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
240 
241 	return 0;
242 }
243 
hci_init1_req(struct hci_request * req,unsigned long opt)244 static int hci_init1_req(struct hci_request *req, unsigned long opt)
245 {
246 	struct hci_dev *hdev = req->hdev;
247 
248 	BT_DBG("%s %ld", hdev->name, opt);
249 
250 	/* Reset */
251 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
252 		hci_reset_req(req, 0);
253 
254 	switch (hdev->dev_type) {
255 	case HCI_PRIMARY:
256 		bredr_init(req);
257 		break;
258 	case HCI_AMP:
259 		amp_init1(req);
260 		break;
261 	default:
262 		bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
263 		break;
264 	}
265 
266 	return 0;
267 }
268 
bredr_setup(struct hci_request * req)269 static void bredr_setup(struct hci_request *req)
270 {
271 	__le16 param;
272 	__u8 flt_type;
273 
274 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
275 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
276 
277 	/* Read Class of Device */
278 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
279 
280 	/* Read Local Name */
281 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
282 
283 	/* Read Voice Setting */
284 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
285 
286 	/* Read Number of Supported IAC */
287 	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
288 
289 	/* Read Current IAC LAP */
290 	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
291 
292 	/* Clear Event Filters */
293 	flt_type = HCI_FLT_CLEAR_ALL;
294 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
295 
296 	/* Connection accept timeout ~20 secs */
297 	param = cpu_to_le16(0x7d00);
298 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
299 }
300 
le_setup(struct hci_request * req)301 static void le_setup(struct hci_request *req)
302 {
303 	struct hci_dev *hdev = req->hdev;
304 
305 	/* Read LE Buffer Size */
306 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307 
308 	/* Read LE Local Supported Features */
309 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
310 
311 	/* Read LE Supported States */
312 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
313 
314 	/* LE-only controllers have LE implicitly enabled */
315 	if (!lmp_bredr_capable(hdev))
316 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
317 }
318 
hci_setup_event_mask(struct hci_request * req)319 static void hci_setup_event_mask(struct hci_request *req)
320 {
321 	struct hci_dev *hdev = req->hdev;
322 
323 	/* The second byte is 0xff instead of 0x9f (two reserved bits
324 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
325 	 * command otherwise.
326 	 */
327 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
328 
329 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
330 	 * any event mask for pre 1.2 devices.
331 	 */
332 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
333 		return;
334 
335 	if (lmp_bredr_capable(hdev)) {
336 		events[4] |= 0x01; /* Flow Specification Complete */
337 	} else {
338 		/* Use a different default for LE-only devices */
339 		memset(events, 0, sizeof(events));
340 		events[1] |= 0x20; /* Command Complete */
341 		events[1] |= 0x40; /* Command Status */
342 		events[1] |= 0x80; /* Hardware Error */
343 
344 		/* If the controller supports the Disconnect command, enable
345 		 * the corresponding event. In addition enable packet flow
346 		 * control related events.
347 		 */
348 		if (hdev->commands[0] & 0x20) {
349 			events[0] |= 0x10; /* Disconnection Complete */
350 			events[2] |= 0x04; /* Number of Completed Packets */
351 			events[3] |= 0x02; /* Data Buffer Overflow */
352 		}
353 
354 		/* If the controller supports the Read Remote Version
355 		 * Information command, enable the corresponding event.
356 		 */
357 		if (hdev->commands[2] & 0x80)
358 			events[1] |= 0x08; /* Read Remote Version Information
359 					    * Complete
360 					    */
361 
362 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
363 			events[0] |= 0x80; /* Encryption Change */
364 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
365 		}
366 	}
367 
368 	if (lmp_inq_rssi_capable(hdev) ||
369 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
370 		events[4] |= 0x02; /* Inquiry Result with RSSI */
371 
372 	if (lmp_ext_feat_capable(hdev))
373 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
374 
375 	if (lmp_esco_capable(hdev)) {
376 		events[5] |= 0x08; /* Synchronous Connection Complete */
377 		events[5] |= 0x10; /* Synchronous Connection Changed */
378 	}
379 
380 	if (lmp_sniffsubr_capable(hdev))
381 		events[5] |= 0x20; /* Sniff Subrating */
382 
383 	if (lmp_pause_enc_capable(hdev))
384 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
385 
386 	if (lmp_ext_inq_capable(hdev))
387 		events[5] |= 0x40; /* Extended Inquiry Result */
388 
389 	if (lmp_no_flush_capable(hdev))
390 		events[7] |= 0x01; /* Enhanced Flush Complete */
391 
392 	if (lmp_lsto_capable(hdev))
393 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
394 
395 	if (lmp_ssp_capable(hdev)) {
396 		events[6] |= 0x01;	/* IO Capability Request */
397 		events[6] |= 0x02;	/* IO Capability Response */
398 		events[6] |= 0x04;	/* User Confirmation Request */
399 		events[6] |= 0x08;	/* User Passkey Request */
400 		events[6] |= 0x10;	/* Remote OOB Data Request */
401 		events[6] |= 0x20;	/* Simple Pairing Complete */
402 		events[7] |= 0x04;	/* User Passkey Notification */
403 		events[7] |= 0x08;	/* Keypress Notification */
404 		events[7] |= 0x10;	/* Remote Host Supported
405 					 * Features Notification
406 					 */
407 	}
408 
409 	if (lmp_le_capable(hdev))
410 		events[7] |= 0x20;	/* LE Meta-Event */
411 
412 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
413 }
414 
hci_init2_req(struct hci_request * req,unsigned long opt)415 static int hci_init2_req(struct hci_request *req, unsigned long opt)
416 {
417 	struct hci_dev *hdev = req->hdev;
418 
419 	if (hdev->dev_type == HCI_AMP)
420 		return amp_init2(req);
421 
422 	if (lmp_bredr_capable(hdev))
423 		bredr_setup(req);
424 	else
425 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
426 
427 	if (lmp_le_capable(hdev))
428 		le_setup(req);
429 
430 	/* All Bluetooth 1.2 and later controllers should support the
431 	 * HCI command for reading the local supported commands.
432 	 *
433 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
434 	 * but do not have support for this command. If that is the case,
435 	 * the driver can quirk the behavior and skip reading the local
436 	 * supported commands.
437 	 */
438 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
439 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
440 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
441 
442 	if (lmp_ssp_capable(hdev)) {
443 		/* When SSP is available, then the host features page
444 		 * should also be available as well. However some
445 		 * controllers list the max_page as 0 as long as SSP
446 		 * has not been enabled. To achieve proper debugging
447 		 * output, force the minimum max_page to 1 at least.
448 		 */
449 		hdev->max_page = 0x01;
450 
451 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
452 			u8 mode = 0x01;
453 
454 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
455 				    sizeof(mode), &mode);
456 		} else {
457 			struct hci_cp_write_eir cp;
458 
459 			memset(hdev->eir, 0, sizeof(hdev->eir));
460 			memset(&cp, 0, sizeof(cp));
461 
462 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
463 		}
464 	}
465 
466 	if (lmp_inq_rssi_capable(hdev) ||
467 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
468 		u8 mode;
469 
470 		/* If Extended Inquiry Result events are supported, then
471 		 * they are clearly preferred over Inquiry Result with RSSI
472 		 * events.
473 		 */
474 		mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
475 
476 		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
477 	}
478 
479 	if (lmp_inq_tx_pwr_capable(hdev))
480 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
481 
482 	if (lmp_ext_feat_capable(hdev)) {
483 		struct hci_cp_read_local_ext_features cp;
484 
485 		cp.page = 0x01;
486 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
487 			    sizeof(cp), &cp);
488 	}
489 
490 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
491 		u8 enable = 1;
492 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
493 			    &enable);
494 	}
495 
496 	return 0;
497 }
498 
hci_setup_link_policy(struct hci_request * req)499 static void hci_setup_link_policy(struct hci_request *req)
500 {
501 	struct hci_dev *hdev = req->hdev;
502 	struct hci_cp_write_def_link_policy cp;
503 	u16 link_policy = 0;
504 
505 	if (lmp_rswitch_capable(hdev))
506 		link_policy |= HCI_LP_RSWITCH;
507 	if (lmp_hold_capable(hdev))
508 		link_policy |= HCI_LP_HOLD;
509 	if (lmp_sniff_capable(hdev))
510 		link_policy |= HCI_LP_SNIFF;
511 	if (lmp_park_capable(hdev))
512 		link_policy |= HCI_LP_PARK;
513 
514 	cp.policy = cpu_to_le16(link_policy);
515 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
516 }
517 
hci_set_le_support(struct hci_request * req)518 static void hci_set_le_support(struct hci_request *req)
519 {
520 	struct hci_dev *hdev = req->hdev;
521 	struct hci_cp_write_le_host_supported cp;
522 
523 	/* LE-only devices do not support explicit enablement */
524 	if (!lmp_bredr_capable(hdev))
525 		return;
526 
527 	memset(&cp, 0, sizeof(cp));
528 
529 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
530 		cp.le = 0x01;
531 		cp.simul = 0x00;
532 	}
533 
534 	if (cp.le != lmp_host_le_capable(hdev))
535 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
536 			    &cp);
537 }
538 
hci_set_event_mask_page_2(struct hci_request * req)539 static void hci_set_event_mask_page_2(struct hci_request *req)
540 {
541 	struct hci_dev *hdev = req->hdev;
542 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
543 	bool changed = false;
544 
545 	/* If Connectionless Slave Broadcast master role is supported
546 	 * enable all necessary events for it.
547 	 */
548 	if (lmp_csb_master_capable(hdev)) {
549 		events[1] |= 0x40;	/* Triggered Clock Capture */
550 		events[1] |= 0x80;	/* Synchronization Train Complete */
551 		events[2] |= 0x10;	/* Slave Page Response Timeout */
552 		events[2] |= 0x20;	/* CSB Channel Map Change */
553 		changed = true;
554 	}
555 
556 	/* If Connectionless Slave Broadcast slave role is supported
557 	 * enable all necessary events for it.
558 	 */
559 	if (lmp_csb_slave_capable(hdev)) {
560 		events[2] |= 0x01;	/* Synchronization Train Received */
561 		events[2] |= 0x02;	/* CSB Receive */
562 		events[2] |= 0x04;	/* CSB Timeout */
563 		events[2] |= 0x08;	/* Truncated Page Complete */
564 		changed = true;
565 	}
566 
567 	/* Enable Authenticated Payload Timeout Expired event if supported */
568 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
569 		events[2] |= 0x80;
570 		changed = true;
571 	}
572 
573 	/* Some Broadcom based controllers indicate support for Set Event
574 	 * Mask Page 2 command, but then actually do not support it. Since
575 	 * the default value is all bits set to zero, the command is only
576 	 * required if the event mask has to be changed. In case no change
577 	 * to the event mask is needed, skip this command.
578 	 */
579 	if (changed)
580 		hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
581 			    sizeof(events), events);
582 }
583 
hci_init3_req(struct hci_request * req,unsigned long opt)584 static int hci_init3_req(struct hci_request *req, unsigned long opt)
585 {
586 	struct hci_dev *hdev = req->hdev;
587 	u8 p;
588 
589 	hci_setup_event_mask(req);
590 
591 	if (hdev->commands[6] & 0x20 &&
592 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
593 		struct hci_cp_read_stored_link_key cp;
594 
595 		bacpy(&cp.bdaddr, BDADDR_ANY);
596 		cp.read_all = 0x01;
597 		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
598 	}
599 
600 	if (hdev->commands[5] & 0x10)
601 		hci_setup_link_policy(req);
602 
603 	if (hdev->commands[8] & 0x01)
604 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
605 
606 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
607 	 * support the Read Page Scan Type command. Check support for
608 	 * this command in the bit mask of supported commands.
609 	 */
610 	if (hdev->commands[13] & 0x01)
611 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
612 
613 	if (lmp_le_capable(hdev)) {
614 		u8 events[8];
615 
616 		memset(events, 0, sizeof(events));
617 
618 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
619 			events[0] |= 0x10;	/* LE Long Term Key Request */
620 
621 		/* If controller supports the Connection Parameters Request
622 		 * Link Layer Procedure, enable the corresponding event.
623 		 */
624 		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
625 			events[0] |= 0x20;	/* LE Remote Connection
626 						 * Parameter Request
627 						 */
628 
629 		/* If the controller supports the Data Length Extension
630 		 * feature, enable the corresponding event.
631 		 */
632 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
633 			events[0] |= 0x40;	/* LE Data Length Change */
634 
635 		/* If the controller supports Extended Scanner Filter
636 		 * Policies, enable the correspondig event.
637 		 */
638 		if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
639 			events[1] |= 0x04;	/* LE Direct Advertising
640 						 * Report
641 						 */
642 
643 		/* If the controller supports Channel Selection Algorithm #2
644 		 * feature, enable the corresponding event.
645 		 */
646 		if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
647 			events[2] |= 0x08;	/* LE Channel Selection
648 						 * Algorithm
649 						 */
650 
651 		/* If the controller supports the LE Set Scan Enable command,
652 		 * enable the corresponding advertising report event.
653 		 */
654 		if (hdev->commands[26] & 0x08)
655 			events[0] |= 0x02;	/* LE Advertising Report */
656 
657 		/* If the controller supports the LE Create Connection
658 		 * command, enable the corresponding event.
659 		 */
660 		if (hdev->commands[26] & 0x10)
661 			events[0] |= 0x01;	/* LE Connection Complete */
662 
663 		/* If the controller supports the LE Connection Update
664 		 * command, enable the corresponding event.
665 		 */
666 		if (hdev->commands[27] & 0x04)
667 			events[0] |= 0x04;	/* LE Connection Update
668 						 * Complete
669 						 */
670 
671 		/* If the controller supports the LE Read Remote Used Features
672 		 * command, enable the corresponding event.
673 		 */
674 		if (hdev->commands[27] & 0x20)
675 			events[0] |= 0x08;	/* LE Read Remote Used
676 						 * Features Complete
677 						 */
678 
679 		/* If the controller supports the LE Read Local P-256
680 		 * Public Key command, enable the corresponding event.
681 		 */
682 		if (hdev->commands[34] & 0x02)
683 			events[0] |= 0x80;	/* LE Read Local P-256
684 						 * Public Key Complete
685 						 */
686 
687 		/* If the controller supports the LE Generate DHKey
688 		 * command, enable the corresponding event.
689 		 */
690 		if (hdev->commands[34] & 0x04)
691 			events[1] |= 0x01;	/* LE Generate DHKey Complete */
692 
693 		/* If the controller supports the LE Set Default PHY or
694 		 * LE Set PHY commands, enable the corresponding event.
695 		 */
696 		if (hdev->commands[35] & (0x20 | 0x40))
697 			events[1] |= 0x08;        /* LE PHY Update Complete */
698 
699 		/* If the controller supports LE Set Extended Scan Parameters
700 		 * and LE Set Extended Scan Enable commands, enable the
701 		 * corresponding event.
702 		 */
703 		if (use_ext_scan(hdev))
704 			events[1] |= 0x10;	/* LE Extended Advertising
705 						 * Report
706 						 */
707 
708 		/* If the controller supports the LE Extended Create Connection
709 		 * command, enable the corresponding event.
710 		 */
711 		if (use_ext_conn(hdev))
712 			events[1] |= 0x02;      /* LE Enhanced Connection
713 						 * Complete
714 						 */
715 
716 		/* If the controller supports the LE Extended Advertising
717 		 * command, enable the corresponding event.
718 		 */
719 		if (ext_adv_capable(hdev))
720 			events[2] |= 0x02;	/* LE Advertising Set
721 						 * Terminated
722 						 */
723 
724 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
725 			    events);
726 
727 		/* Read LE Advertising Channel TX Power */
728 		if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
729 			/* HCI TS spec forbids mixing of legacy and extended
730 			 * advertising commands wherein READ_ADV_TX_POWER is
731 			 * also included. So do not call it if extended adv
732 			 * is supported otherwise controller will return
733 			 * COMMAND_DISALLOWED for extended commands.
734 			 */
735 			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
736 		}
737 
738 		if (hdev->commands[26] & 0x40) {
739 			/* Read LE White List Size */
740 			hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
741 				    0, NULL);
742 		}
743 
744 		if (hdev->commands[26] & 0x80) {
745 			/* Clear LE White List */
746 			hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
747 		}
748 
749 		if (hdev->commands[34] & 0x40) {
750 			/* Read LE Resolving List Size */
751 			hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
752 				    0, NULL);
753 		}
754 
755 		if (hdev->commands[34] & 0x20) {
756 			/* Clear LE Resolving List */
757 			hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
758 		}
759 
760 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
761 			/* Read LE Maximum Data Length */
762 			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
763 
764 			/* Read LE Suggested Default Data Length */
765 			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
766 		}
767 
768 		if (ext_adv_capable(hdev)) {
769 			/* Read LE Number of Supported Advertising Sets */
770 			hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
771 				    0, NULL);
772 		}
773 
774 		hci_set_le_support(req);
775 	}
776 
777 	/* Read features beyond page 1 if available */
778 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
779 		struct hci_cp_read_local_ext_features cp;
780 
781 		cp.page = p;
782 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
783 			    sizeof(cp), &cp);
784 	}
785 
786 	return 0;
787 }
788 
hci_init4_req(struct hci_request * req,unsigned long opt)789 static int hci_init4_req(struct hci_request *req, unsigned long opt)
790 {
791 	struct hci_dev *hdev = req->hdev;
792 
793 	/* Some Broadcom based Bluetooth controllers do not support the
794 	 * Delete Stored Link Key command. They are clearly indicating its
795 	 * absence in the bit mask of supported commands.
796 	 *
797 	 * Check the supported commands and only if the the command is marked
798 	 * as supported send it. If not supported assume that the controller
799 	 * does not have actual support for stored link keys which makes this
800 	 * command redundant anyway.
801 	 *
802 	 * Some controllers indicate that they support handling deleting
803 	 * stored link keys, but they don't. The quirk lets a driver
804 	 * just disable this command.
805 	 */
806 	if (hdev->commands[6] & 0x80 &&
807 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
808 		struct hci_cp_delete_stored_link_key cp;
809 
810 		bacpy(&cp.bdaddr, BDADDR_ANY);
811 		cp.delete_all = 0x01;
812 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
813 			    sizeof(cp), &cp);
814 	}
815 
816 	/* Set event mask page 2 if the HCI command for it is supported */
817 	if (hdev->commands[22] & 0x04)
818 		hci_set_event_mask_page_2(req);
819 
820 	/* Read local codec list if the HCI command is supported */
821 	if (hdev->commands[29] & 0x20)
822 		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
823 
824 	/* Get MWS transport configuration if the HCI command is supported */
825 	if (hdev->commands[30] & 0x08)
826 		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
827 
828 	/* Check for Synchronization Train support */
829 	if (lmp_sync_train_capable(hdev))
830 		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
831 
832 	/* Enable Secure Connections if supported and configured */
833 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
834 	    bredr_sc_enabled(hdev)) {
835 		u8 support = 0x01;
836 
837 		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
838 			    sizeof(support), &support);
839 	}
840 
841 	/* Set Suggested Default Data Length to maximum if supported */
842 	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
843 		struct hci_cp_le_write_def_data_len cp;
844 
845 		cp.tx_len = hdev->le_max_tx_len;
846 		cp.tx_time = hdev->le_max_tx_time;
847 		hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
848 	}
849 
850 	/* Set Default PHY parameters if command is supported */
851 	if (hdev->commands[35] & 0x20) {
852 		struct hci_cp_le_set_default_phy cp;
853 
854 		cp.all_phys = 0x00;
855 		cp.tx_phys = hdev->le_tx_def_phys;
856 		cp.rx_phys = hdev->le_rx_def_phys;
857 
858 		hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
859 	}
860 
861 	return 0;
862 }
863 
__hci_init(struct hci_dev * hdev)864 static int __hci_init(struct hci_dev *hdev)
865 {
866 	int err;
867 
868 	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
869 	if (err < 0)
870 		return err;
871 
872 	if (hci_dev_test_flag(hdev, HCI_SETUP))
873 		hci_debugfs_create_basic(hdev);
874 
875 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
876 	if (err < 0)
877 		return err;
878 
879 	/* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
880 	 * BR/EDR/LE type controllers. AMP controllers only need the
881 	 * first two stages of init.
882 	 */
883 	if (hdev->dev_type != HCI_PRIMARY)
884 		return 0;
885 
886 	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
887 	if (err < 0)
888 		return err;
889 
890 	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
891 	if (err < 0)
892 		return err;
893 
894 	/* This function is only called when the controller is actually in
895 	 * configured state. When the controller is marked as unconfigured,
896 	 * this initialization procedure is not run.
897 	 *
898 	 * It means that it is possible that a controller runs through its
899 	 * setup phase and then discovers missing settings. If that is the
900 	 * case, then this function will not be called. It then will only
901 	 * be called during the config phase.
902 	 *
903 	 * So only when in setup phase or config phase, create the debugfs
904 	 * entries and register the SMP channels.
905 	 */
906 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
907 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
908 		return 0;
909 
910 	hci_debugfs_create_common(hdev);
911 
912 	if (lmp_bredr_capable(hdev))
913 		hci_debugfs_create_bredr(hdev);
914 
915 	if (lmp_le_capable(hdev))
916 		hci_debugfs_create_le(hdev);
917 
918 	return 0;
919 }
920 
hci_init0_req(struct hci_request * req,unsigned long opt)921 static int hci_init0_req(struct hci_request *req, unsigned long opt)
922 {
923 	struct hci_dev *hdev = req->hdev;
924 
925 	BT_DBG("%s %ld", hdev->name, opt);
926 
927 	/* Reset */
928 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
929 		hci_reset_req(req, 0);
930 
931 	/* Read Local Version */
932 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
933 
934 	/* Read BD Address */
935 	if (hdev->set_bdaddr)
936 		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
937 
938 	return 0;
939 }
940 
__hci_unconf_init(struct hci_dev * hdev)941 static int __hci_unconf_init(struct hci_dev *hdev)
942 {
943 	int err;
944 
945 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
946 		return 0;
947 
948 	err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
949 	if (err < 0)
950 		return err;
951 
952 	if (hci_dev_test_flag(hdev, HCI_SETUP))
953 		hci_debugfs_create_basic(hdev);
954 
955 	return 0;
956 }
957 
hci_scan_req(struct hci_request * req,unsigned long opt)958 static int hci_scan_req(struct hci_request *req, unsigned long opt)
959 {
960 	__u8 scan = opt;
961 
962 	BT_DBG("%s %x", req->hdev->name, scan);
963 
964 	/* Inquiry and Page scans */
965 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
966 	return 0;
967 }
968 
hci_auth_req(struct hci_request * req,unsigned long opt)969 static int hci_auth_req(struct hci_request *req, unsigned long opt)
970 {
971 	__u8 auth = opt;
972 
973 	BT_DBG("%s %x", req->hdev->name, auth);
974 
975 	/* Authentication */
976 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
977 	return 0;
978 }
979 
hci_encrypt_req(struct hci_request * req,unsigned long opt)980 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
981 {
982 	__u8 encrypt = opt;
983 
984 	BT_DBG("%s %x", req->hdev->name, encrypt);
985 
986 	/* Encryption */
987 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
988 	return 0;
989 }
990 
hci_linkpol_req(struct hci_request * req,unsigned long opt)991 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
992 {
993 	__le16 policy = cpu_to_le16(opt);
994 
995 	BT_DBG("%s %x", req->hdev->name, policy);
996 
997 	/* Default link policy */
998 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
999 	return 0;
1000 }
1001 
1002 /* Get HCI device by index.
1003  * Device is held on return. */
hci_dev_get(int index)1004 struct hci_dev *hci_dev_get(int index)
1005 {
1006 	struct hci_dev *hdev = NULL, *d;
1007 
1008 	BT_DBG("%d", index);
1009 
1010 	if (index < 0)
1011 		return NULL;
1012 
1013 	read_lock(&hci_dev_list_lock);
1014 	list_for_each_entry(d, &hci_dev_list, list) {
1015 		if (d->id == index) {
1016 			hdev = hci_dev_hold(d);
1017 			break;
1018 		}
1019 	}
1020 	read_unlock(&hci_dev_list_lock);
1021 	return hdev;
1022 }
1023 
1024 /* ---- Inquiry support ---- */
1025 
hci_discovery_active(struct hci_dev * hdev)1026 bool hci_discovery_active(struct hci_dev *hdev)
1027 {
1028 	struct discovery_state *discov = &hdev->discovery;
1029 
1030 	switch (discov->state) {
1031 	case DISCOVERY_FINDING:
1032 	case DISCOVERY_RESOLVING:
1033 		return true;
1034 
1035 	default:
1036 		return false;
1037 	}
1038 }
1039 
hci_discovery_set_state(struct hci_dev * hdev,int state)1040 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1041 {
1042 	int old_state = hdev->discovery.state;
1043 
1044 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1045 
1046 	if (old_state == state)
1047 		return;
1048 
1049 	hdev->discovery.state = state;
1050 
1051 	switch (state) {
1052 	case DISCOVERY_STOPPED:
1053 		hci_update_background_scan(hdev);
1054 
1055 		if (old_state != DISCOVERY_STARTING)
1056 			mgmt_discovering(hdev, 0);
1057 		break;
1058 	case DISCOVERY_STARTING:
1059 		break;
1060 	case DISCOVERY_FINDING:
1061 		mgmt_discovering(hdev, 1);
1062 		break;
1063 	case DISCOVERY_RESOLVING:
1064 		break;
1065 	case DISCOVERY_STOPPING:
1066 		break;
1067 	}
1068 }
1069 
hci_inquiry_cache_flush(struct hci_dev * hdev)1070 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1071 {
1072 	struct discovery_state *cache = &hdev->discovery;
1073 	struct inquiry_entry *p, *n;
1074 
1075 	list_for_each_entry_safe(p, n, &cache->all, all) {
1076 		list_del(&p->all);
1077 		kfree(p);
1078 	}
1079 
1080 	INIT_LIST_HEAD(&cache->unknown);
1081 	INIT_LIST_HEAD(&cache->resolve);
1082 }
1083 
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)1084 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1085 					       bdaddr_t *bdaddr)
1086 {
1087 	struct discovery_state *cache = &hdev->discovery;
1088 	struct inquiry_entry *e;
1089 
1090 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1091 
1092 	list_for_each_entry(e, &cache->all, all) {
1093 		if (!bacmp(&e->data.bdaddr, bdaddr))
1094 			return e;
1095 	}
1096 
1097 	return NULL;
1098 }
1099 
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)1100 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1101 						       bdaddr_t *bdaddr)
1102 {
1103 	struct discovery_state *cache = &hdev->discovery;
1104 	struct inquiry_entry *e;
1105 
1106 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1107 
1108 	list_for_each_entry(e, &cache->unknown, list) {
1109 		if (!bacmp(&e->data.bdaddr, bdaddr))
1110 			return e;
1111 	}
1112 
1113 	return NULL;
1114 }
1115 
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)1116 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1117 						       bdaddr_t *bdaddr,
1118 						       int state)
1119 {
1120 	struct discovery_state *cache = &hdev->discovery;
1121 	struct inquiry_entry *e;
1122 
1123 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1124 
1125 	list_for_each_entry(e, &cache->resolve, list) {
1126 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1127 			return e;
1128 		if (!bacmp(&e->data.bdaddr, bdaddr))
1129 			return e;
1130 	}
1131 
1132 	return NULL;
1133 }
1134 
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)1135 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1136 				      struct inquiry_entry *ie)
1137 {
1138 	struct discovery_state *cache = &hdev->discovery;
1139 	struct list_head *pos = &cache->resolve;
1140 	struct inquiry_entry *p;
1141 
1142 	list_del(&ie->list);
1143 
1144 	list_for_each_entry(p, &cache->resolve, list) {
1145 		if (p->name_state != NAME_PENDING &&
1146 		    abs(p->data.rssi) >= abs(ie->data.rssi))
1147 			break;
1148 		pos = &p->list;
1149 	}
1150 
1151 	list_add(&ie->list, pos);
1152 }
1153 
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)1154 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1155 			     bool name_known)
1156 {
1157 	struct discovery_state *cache = &hdev->discovery;
1158 	struct inquiry_entry *ie;
1159 	u32 flags = 0;
1160 
1161 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1162 
1163 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1164 
1165 	if (!data->ssp_mode)
1166 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1167 
1168 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1169 	if (ie) {
1170 		if (!ie->data.ssp_mode)
1171 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1172 
1173 		if (ie->name_state == NAME_NEEDED &&
1174 		    data->rssi != ie->data.rssi) {
1175 			ie->data.rssi = data->rssi;
1176 			hci_inquiry_cache_update_resolve(hdev, ie);
1177 		}
1178 
1179 		goto update;
1180 	}
1181 
1182 	/* Entry not in the cache. Add new one. */
1183 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1184 	if (!ie) {
1185 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1186 		goto done;
1187 	}
1188 
1189 	list_add(&ie->all, &cache->all);
1190 
1191 	if (name_known) {
1192 		ie->name_state = NAME_KNOWN;
1193 	} else {
1194 		ie->name_state = NAME_NOT_KNOWN;
1195 		list_add(&ie->list, &cache->unknown);
1196 	}
1197 
1198 update:
1199 	if (name_known && ie->name_state != NAME_KNOWN &&
1200 	    ie->name_state != NAME_PENDING) {
1201 		ie->name_state = NAME_KNOWN;
1202 		list_del(&ie->list);
1203 	}
1204 
1205 	memcpy(&ie->data, data, sizeof(*data));
1206 	ie->timestamp = jiffies;
1207 	cache->timestamp = jiffies;
1208 
1209 	if (ie->name_state == NAME_NOT_KNOWN)
1210 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1211 
1212 done:
1213 	return flags;
1214 }
1215 
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)1216 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1217 {
1218 	struct discovery_state *cache = &hdev->discovery;
1219 	struct inquiry_info *info = (struct inquiry_info *) buf;
1220 	struct inquiry_entry *e;
1221 	int copied = 0;
1222 
1223 	list_for_each_entry(e, &cache->all, all) {
1224 		struct inquiry_data *data = &e->data;
1225 
1226 		if (copied >= num)
1227 			break;
1228 
1229 		bacpy(&info->bdaddr, &data->bdaddr);
1230 		info->pscan_rep_mode	= data->pscan_rep_mode;
1231 		info->pscan_period_mode	= data->pscan_period_mode;
1232 		info->pscan_mode	= data->pscan_mode;
1233 		memcpy(info->dev_class, data->dev_class, 3);
1234 		info->clock_offset	= data->clock_offset;
1235 
1236 		info++;
1237 		copied++;
1238 	}
1239 
1240 	BT_DBG("cache %p, copied %d", cache, copied);
1241 	return copied;
1242 }
1243 
hci_inq_req(struct hci_request * req,unsigned long opt)1244 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1245 {
1246 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1247 	struct hci_dev *hdev = req->hdev;
1248 	struct hci_cp_inquiry cp;
1249 
1250 	BT_DBG("%s", hdev->name);
1251 
1252 	if (test_bit(HCI_INQUIRY, &hdev->flags))
1253 		return 0;
1254 
1255 	/* Start Inquiry */
1256 	memcpy(&cp.lap, &ir->lap, 3);
1257 	cp.length  = ir->length;
1258 	cp.num_rsp = ir->num_rsp;
1259 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1260 
1261 	return 0;
1262 }
1263 
hci_inquiry(void __user * arg)1264 int hci_inquiry(void __user *arg)
1265 {
1266 	__u8 __user *ptr = arg;
1267 	struct hci_inquiry_req ir;
1268 	struct hci_dev *hdev;
1269 	int err = 0, do_inquiry = 0, max_rsp;
1270 	long timeo;
1271 	__u8 *buf;
1272 
1273 	if (copy_from_user(&ir, ptr, sizeof(ir)))
1274 		return -EFAULT;
1275 
1276 	hdev = hci_dev_get(ir.dev_id);
1277 	if (!hdev)
1278 		return -ENODEV;
1279 
1280 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1281 		err = -EBUSY;
1282 		goto done;
1283 	}
1284 
1285 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1286 		err = -EOPNOTSUPP;
1287 		goto done;
1288 	}
1289 
1290 	if (hdev->dev_type != HCI_PRIMARY) {
1291 		err = -EOPNOTSUPP;
1292 		goto done;
1293 	}
1294 
1295 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1296 		err = -EOPNOTSUPP;
1297 		goto done;
1298 	}
1299 
1300 	hci_dev_lock(hdev);
1301 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1302 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1303 		hci_inquiry_cache_flush(hdev);
1304 		do_inquiry = 1;
1305 	}
1306 	hci_dev_unlock(hdev);
1307 
1308 	timeo = ir.length * msecs_to_jiffies(2000);
1309 
1310 	if (do_inquiry) {
1311 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1312 				   timeo, NULL);
1313 		if (err < 0)
1314 			goto done;
1315 
1316 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1317 		 * cleared). If it is interrupted by a signal, return -EINTR.
1318 		 */
1319 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1320 				TASK_INTERRUPTIBLE))
1321 			return -EINTR;
1322 	}
1323 
1324 	/* for unlimited number of responses we will use buffer with
1325 	 * 255 entries
1326 	 */
1327 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1328 
1329 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
1330 	 * copy it to the user space.
1331 	 */
1332 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1333 	if (!buf) {
1334 		err = -ENOMEM;
1335 		goto done;
1336 	}
1337 
1338 	hci_dev_lock(hdev);
1339 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1340 	hci_dev_unlock(hdev);
1341 
1342 	BT_DBG("num_rsp %d", ir.num_rsp);
1343 
1344 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1345 		ptr += sizeof(ir);
1346 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1347 				 ir.num_rsp))
1348 			err = -EFAULT;
1349 	} else
1350 		err = -EFAULT;
1351 
1352 	kfree(buf);
1353 
1354 done:
1355 	hci_dev_put(hdev);
1356 	return err;
1357 }
1358 
1359 /**
1360  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1361  *				       (BD_ADDR) for a HCI device from
1362  *				       a firmware node property.
1363  * @hdev:	The HCI device
1364  *
1365  * Search the firmware node for 'local-bd-address'.
1366  *
1367  * All-zero BD addresses are rejected, because those could be properties
1368  * that exist in the firmware tables, but were not updated by the firmware. For
1369  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1370  */
hci_dev_get_bd_addr_from_property(struct hci_dev * hdev)1371 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1372 {
1373 	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1374 	bdaddr_t ba;
1375 	int ret;
1376 
1377 	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1378 					    (u8 *)&ba, sizeof(ba));
1379 	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1380 		return;
1381 
1382 	bacpy(&hdev->public_addr, &ba);
1383 }
1384 
hci_dev_do_open(struct hci_dev * hdev)1385 static int hci_dev_do_open(struct hci_dev *hdev)
1386 {
1387 	int ret = 0;
1388 
1389 	BT_DBG("%s %p", hdev->name, hdev);
1390 
1391 	hci_req_sync_lock(hdev);
1392 
1393 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1394 		ret = -ENODEV;
1395 		goto done;
1396 	}
1397 
1398 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1399 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1400 		/* Check for rfkill but allow the HCI setup stage to
1401 		 * proceed (which in itself doesn't cause any RF activity).
1402 		 */
1403 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1404 			ret = -ERFKILL;
1405 			goto done;
1406 		}
1407 
1408 		/* Check for valid public address or a configured static
1409 		 * random adddress, but let the HCI setup proceed to
1410 		 * be able to determine if there is a public address
1411 		 * or not.
1412 		 *
1413 		 * In case of user channel usage, it is not important
1414 		 * if a public address or static random address is
1415 		 * available.
1416 		 *
1417 		 * This check is only valid for BR/EDR controllers
1418 		 * since AMP controllers do not have an address.
1419 		 */
1420 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1421 		    hdev->dev_type == HCI_PRIMARY &&
1422 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1423 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1424 			ret = -EADDRNOTAVAIL;
1425 			goto done;
1426 		}
1427 	}
1428 
1429 	if (test_bit(HCI_UP, &hdev->flags)) {
1430 		ret = -EALREADY;
1431 		goto done;
1432 	}
1433 
1434 	if (hdev->open(hdev)) {
1435 		ret = -EIO;
1436 		goto done;
1437 	}
1438 
1439 	set_bit(HCI_RUNNING, &hdev->flags);
1440 	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1441 
1442 	atomic_set(&hdev->cmd_cnt, 1);
1443 	set_bit(HCI_INIT, &hdev->flags);
1444 
1445 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1446 	    test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1447 		hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1448 
1449 		if (hdev->setup)
1450 			ret = hdev->setup(hdev);
1451 
1452 		if (ret)
1453 			goto setup_failed;
1454 
1455 		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1456 			if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1457 				hci_dev_get_bd_addr_from_property(hdev);
1458 
1459 			if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1460 			    hdev->set_bdaddr)
1461 				ret = hdev->set_bdaddr(hdev,
1462 						       &hdev->public_addr);
1463 		}
1464 
1465 setup_failed:
1466 		/* The transport driver can set these quirks before
1467 		 * creating the HCI device or in its setup callback.
1468 		 *
1469 		 * In case any of them is set, the controller has to
1470 		 * start up as unconfigured.
1471 		 */
1472 		if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1473 		    test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1474 			hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1475 
1476 		/* For an unconfigured controller it is required to
1477 		 * read at least the version information provided by
1478 		 * the Read Local Version Information command.
1479 		 *
1480 		 * If the set_bdaddr driver callback is provided, then
1481 		 * also the original Bluetooth public device address
1482 		 * will be read using the Read BD Address command.
1483 		 */
1484 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1485 			ret = __hci_unconf_init(hdev);
1486 	}
1487 
1488 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1489 		/* If public address change is configured, ensure that
1490 		 * the address gets programmed. If the driver does not
1491 		 * support changing the public address, fail the power
1492 		 * on procedure.
1493 		 */
1494 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1495 		    hdev->set_bdaddr)
1496 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1497 		else
1498 			ret = -EADDRNOTAVAIL;
1499 	}
1500 
1501 	if (!ret) {
1502 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1503 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1504 			ret = __hci_init(hdev);
1505 			if (!ret && hdev->post_init)
1506 				ret = hdev->post_init(hdev);
1507 		}
1508 	}
1509 
1510 	/* If the HCI Reset command is clearing all diagnostic settings,
1511 	 * then they need to be reprogrammed after the init procedure
1512 	 * completed.
1513 	 */
1514 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1515 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1516 	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1517 		ret = hdev->set_diag(hdev, true);
1518 
1519 	clear_bit(HCI_INIT, &hdev->flags);
1520 
1521 	if (!ret) {
1522 		hci_dev_hold(hdev);
1523 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1524 		hci_adv_instances_set_rpa_expired(hdev, true);
1525 		set_bit(HCI_UP, &hdev->flags);
1526 		hci_sock_dev_event(hdev, HCI_DEV_UP);
1527 		hci_leds_update_powered(hdev, true);
1528 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1529 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1530 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1531 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1532 		    hci_dev_test_flag(hdev, HCI_MGMT) &&
1533 		    hdev->dev_type == HCI_PRIMARY) {
1534 			ret = __hci_req_hci_power_on(hdev);
1535 			mgmt_power_on(hdev, ret);
1536 		}
1537 	} else {
1538 		/* Init failed, cleanup */
1539 		flush_work(&hdev->tx_work);
1540 		flush_work(&hdev->cmd_work);
1541 		flush_work(&hdev->rx_work);
1542 
1543 		skb_queue_purge(&hdev->cmd_q);
1544 		skb_queue_purge(&hdev->rx_q);
1545 
1546 		if (hdev->flush)
1547 			hdev->flush(hdev);
1548 
1549 		if (hdev->sent_cmd) {
1550 			kfree_skb(hdev->sent_cmd);
1551 			hdev->sent_cmd = NULL;
1552 		}
1553 
1554 		clear_bit(HCI_RUNNING, &hdev->flags);
1555 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1556 
1557 		hdev->close(hdev);
1558 		hdev->flags &= BIT(HCI_RAW);
1559 	}
1560 
1561 done:
1562 	hci_req_sync_unlock(hdev);
1563 	return ret;
1564 }
1565 
1566 /* ---- HCI ioctl helpers ---- */
1567 
hci_dev_open(__u16 dev)1568 int hci_dev_open(__u16 dev)
1569 {
1570 	struct hci_dev *hdev;
1571 	int err;
1572 
1573 	hdev = hci_dev_get(dev);
1574 	if (!hdev)
1575 		return -ENODEV;
1576 
1577 	/* Devices that are marked as unconfigured can only be powered
1578 	 * up as user channel. Trying to bring them up as normal devices
1579 	 * will result into a failure. Only user channel operation is
1580 	 * possible.
1581 	 *
1582 	 * When this function is called for a user channel, the flag
1583 	 * HCI_USER_CHANNEL will be set first before attempting to
1584 	 * open the device.
1585 	 */
1586 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1587 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1588 		err = -EOPNOTSUPP;
1589 		goto done;
1590 	}
1591 
1592 	/* We need to ensure that no other power on/off work is pending
1593 	 * before proceeding to call hci_dev_do_open. This is
1594 	 * particularly important if the setup procedure has not yet
1595 	 * completed.
1596 	 */
1597 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1598 		cancel_delayed_work(&hdev->power_off);
1599 
1600 	/* After this call it is guaranteed that the setup procedure
1601 	 * has finished. This means that error conditions like RFKILL
1602 	 * or no valid public or static random address apply.
1603 	 */
1604 	flush_workqueue(hdev->req_workqueue);
1605 
1606 	/* For controllers not using the management interface and that
1607 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1608 	 * so that pairing works for them. Once the management interface
1609 	 * is in use this bit will be cleared again and userspace has
1610 	 * to explicitly enable it.
1611 	 */
1612 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1613 	    !hci_dev_test_flag(hdev, HCI_MGMT))
1614 		hci_dev_set_flag(hdev, HCI_BONDABLE);
1615 
1616 	err = hci_dev_do_open(hdev);
1617 
1618 done:
1619 	hci_dev_put(hdev);
1620 	return err;
1621 }
1622 
1623 /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)1624 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1625 {
1626 	struct hci_conn_params *p;
1627 
1628 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1629 		if (p->conn) {
1630 			hci_conn_drop(p->conn);
1631 			hci_conn_put(p->conn);
1632 			p->conn = NULL;
1633 		}
1634 		list_del_init(&p->action);
1635 	}
1636 
1637 	BT_DBG("All LE pending actions cleared");
1638 }
1639 
hci_dev_do_close(struct hci_dev * hdev)1640 int hci_dev_do_close(struct hci_dev *hdev)
1641 {
1642 	bool auto_off;
1643 
1644 	BT_DBG("%s %p", hdev->name, hdev);
1645 
1646 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1647 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1648 	    test_bit(HCI_UP, &hdev->flags)) {
1649 		/* Execute vendor specific shutdown routine */
1650 		if (hdev->shutdown)
1651 			hdev->shutdown(hdev);
1652 	}
1653 
1654 	cancel_delayed_work(&hdev->power_off);
1655 
1656 	hci_request_cancel_all(hdev);
1657 	hci_req_sync_lock(hdev);
1658 
1659 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1660 		cancel_delayed_work_sync(&hdev->cmd_timer);
1661 		hci_req_sync_unlock(hdev);
1662 		return 0;
1663 	}
1664 
1665 	hci_leds_update_powered(hdev, false);
1666 
1667 	/* Flush RX and TX works */
1668 	flush_work(&hdev->tx_work);
1669 	flush_work(&hdev->rx_work);
1670 
1671 	if (hdev->discov_timeout > 0) {
1672 		hdev->discov_timeout = 0;
1673 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1674 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1675 	}
1676 
1677 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1678 		cancel_delayed_work(&hdev->service_cache);
1679 
1680 	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1681 		struct adv_info *adv_instance;
1682 
1683 		cancel_delayed_work_sync(&hdev->rpa_expired);
1684 
1685 		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1686 			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1687 	}
1688 
1689 	/* Avoid potential lockdep warnings from the *_flush() calls by
1690 	 * ensuring the workqueue is empty up front.
1691 	 */
1692 	drain_workqueue(hdev->workqueue);
1693 
1694 	hci_dev_lock(hdev);
1695 
1696 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1697 
1698 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1699 
1700 	if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1701 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1702 	    hci_dev_test_flag(hdev, HCI_MGMT))
1703 		__mgmt_power_off(hdev);
1704 
1705 	hci_inquiry_cache_flush(hdev);
1706 	hci_pend_le_actions_clear(hdev);
1707 	hci_conn_hash_flush(hdev);
1708 	hci_dev_unlock(hdev);
1709 
1710 	smp_unregister(hdev);
1711 
1712 	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1713 
1714 	if (hdev->flush)
1715 		hdev->flush(hdev);
1716 
1717 	/* Reset device */
1718 	skb_queue_purge(&hdev->cmd_q);
1719 	atomic_set(&hdev->cmd_cnt, 1);
1720 	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1721 	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1722 		set_bit(HCI_INIT, &hdev->flags);
1723 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1724 		clear_bit(HCI_INIT, &hdev->flags);
1725 	}
1726 
1727 	/* flush cmd  work */
1728 	flush_work(&hdev->cmd_work);
1729 
1730 	/* Drop queues */
1731 	skb_queue_purge(&hdev->rx_q);
1732 	skb_queue_purge(&hdev->cmd_q);
1733 	skb_queue_purge(&hdev->raw_q);
1734 
1735 	/* Drop last sent command */
1736 	if (hdev->sent_cmd) {
1737 		cancel_delayed_work_sync(&hdev->cmd_timer);
1738 		kfree_skb(hdev->sent_cmd);
1739 		hdev->sent_cmd = NULL;
1740 	}
1741 
1742 	clear_bit(HCI_RUNNING, &hdev->flags);
1743 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1744 
1745 	/* After this point our queues are empty
1746 	 * and no tasks are scheduled. */
1747 	hdev->close(hdev);
1748 
1749 	/* Clear flags */
1750 	hdev->flags &= BIT(HCI_RAW);
1751 	hci_dev_clear_volatile_flags(hdev);
1752 
1753 	/* Controller radio is available but is currently powered down */
1754 	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1755 
1756 	memset(hdev->eir, 0, sizeof(hdev->eir));
1757 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1758 	bacpy(&hdev->random_addr, BDADDR_ANY);
1759 
1760 	hci_req_sync_unlock(hdev);
1761 
1762 	hci_dev_put(hdev);
1763 	return 0;
1764 }
1765 
hci_dev_close(__u16 dev)1766 int hci_dev_close(__u16 dev)
1767 {
1768 	struct hci_dev *hdev;
1769 	int err;
1770 
1771 	hdev = hci_dev_get(dev);
1772 	if (!hdev)
1773 		return -ENODEV;
1774 
1775 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1776 		err = -EBUSY;
1777 		goto done;
1778 	}
1779 
1780 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1781 		cancel_delayed_work(&hdev->power_off);
1782 
1783 	err = hci_dev_do_close(hdev);
1784 
1785 done:
1786 	hci_dev_put(hdev);
1787 	return err;
1788 }
1789 
hci_dev_do_reset(struct hci_dev * hdev)1790 static int hci_dev_do_reset(struct hci_dev *hdev)
1791 {
1792 	int ret;
1793 
1794 	BT_DBG("%s %p", hdev->name, hdev);
1795 
1796 	hci_req_sync_lock(hdev);
1797 
1798 	/* Drop queues */
1799 	skb_queue_purge(&hdev->rx_q);
1800 	skb_queue_purge(&hdev->cmd_q);
1801 
1802 	/* Avoid potential lockdep warnings from the *_flush() calls by
1803 	 * ensuring the workqueue is empty up front.
1804 	 */
1805 	drain_workqueue(hdev->workqueue);
1806 
1807 	hci_dev_lock(hdev);
1808 	hci_inquiry_cache_flush(hdev);
1809 	hci_conn_hash_flush(hdev);
1810 	hci_dev_unlock(hdev);
1811 
1812 	if (hdev->flush)
1813 		hdev->flush(hdev);
1814 
1815 	atomic_set(&hdev->cmd_cnt, 1);
1816 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1817 
1818 	ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1819 
1820 	hci_req_sync_unlock(hdev);
1821 	return ret;
1822 }
1823 
hci_dev_reset(__u16 dev)1824 int hci_dev_reset(__u16 dev)
1825 {
1826 	struct hci_dev *hdev;
1827 	int err;
1828 
1829 	hdev = hci_dev_get(dev);
1830 	if (!hdev)
1831 		return -ENODEV;
1832 
1833 	if (!test_bit(HCI_UP, &hdev->flags)) {
1834 		err = -ENETDOWN;
1835 		goto done;
1836 	}
1837 
1838 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1839 		err = -EBUSY;
1840 		goto done;
1841 	}
1842 
1843 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1844 		err = -EOPNOTSUPP;
1845 		goto done;
1846 	}
1847 
1848 	err = hci_dev_do_reset(hdev);
1849 
1850 done:
1851 	hci_dev_put(hdev);
1852 	return err;
1853 }
1854 
hci_dev_reset_stat(__u16 dev)1855 int hci_dev_reset_stat(__u16 dev)
1856 {
1857 	struct hci_dev *hdev;
1858 	int ret = 0;
1859 
1860 	hdev = hci_dev_get(dev);
1861 	if (!hdev)
1862 		return -ENODEV;
1863 
1864 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1865 		ret = -EBUSY;
1866 		goto done;
1867 	}
1868 
1869 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1870 		ret = -EOPNOTSUPP;
1871 		goto done;
1872 	}
1873 
1874 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1875 
1876 done:
1877 	hci_dev_put(hdev);
1878 	return ret;
1879 }
1880 
hci_update_scan_state(struct hci_dev * hdev,u8 scan)1881 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1882 {
1883 	bool conn_changed, discov_changed;
1884 
1885 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
1886 
1887 	if ((scan & SCAN_PAGE))
1888 		conn_changed = !hci_dev_test_and_set_flag(hdev,
1889 							  HCI_CONNECTABLE);
1890 	else
1891 		conn_changed = hci_dev_test_and_clear_flag(hdev,
1892 							   HCI_CONNECTABLE);
1893 
1894 	if ((scan & SCAN_INQUIRY)) {
1895 		discov_changed = !hci_dev_test_and_set_flag(hdev,
1896 							    HCI_DISCOVERABLE);
1897 	} else {
1898 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1899 		discov_changed = hci_dev_test_and_clear_flag(hdev,
1900 							     HCI_DISCOVERABLE);
1901 	}
1902 
1903 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
1904 		return;
1905 
1906 	if (conn_changed || discov_changed) {
1907 		/* In case this was disabled through mgmt */
1908 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1909 
1910 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1911 			hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1912 
1913 		mgmt_new_settings(hdev);
1914 	}
1915 }
1916 
hci_dev_cmd(unsigned int cmd,void __user * arg)1917 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1918 {
1919 	struct hci_dev *hdev;
1920 	struct hci_dev_req dr;
1921 	int err = 0;
1922 
1923 	if (copy_from_user(&dr, arg, sizeof(dr)))
1924 		return -EFAULT;
1925 
1926 	hdev = hci_dev_get(dr.dev_id);
1927 	if (!hdev)
1928 		return -ENODEV;
1929 
1930 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1931 		err = -EBUSY;
1932 		goto done;
1933 	}
1934 
1935 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1936 		err = -EOPNOTSUPP;
1937 		goto done;
1938 	}
1939 
1940 	if (hdev->dev_type != HCI_PRIMARY) {
1941 		err = -EOPNOTSUPP;
1942 		goto done;
1943 	}
1944 
1945 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1946 		err = -EOPNOTSUPP;
1947 		goto done;
1948 	}
1949 
1950 	switch (cmd) {
1951 	case HCISETAUTH:
1952 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1953 				   HCI_INIT_TIMEOUT, NULL);
1954 		break;
1955 
1956 	case HCISETENCRYPT:
1957 		if (!lmp_encrypt_capable(hdev)) {
1958 			err = -EOPNOTSUPP;
1959 			break;
1960 		}
1961 
1962 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
1963 			/* Auth must be enabled first */
1964 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1965 					   HCI_INIT_TIMEOUT, NULL);
1966 			if (err)
1967 				break;
1968 		}
1969 
1970 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1971 				   HCI_INIT_TIMEOUT, NULL);
1972 		break;
1973 
1974 	case HCISETSCAN:
1975 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1976 				   HCI_INIT_TIMEOUT, NULL);
1977 
1978 		/* Ensure that the connectable and discoverable states
1979 		 * get correctly modified as this was a non-mgmt change.
1980 		 */
1981 		if (!err)
1982 			hci_update_scan_state(hdev, dr.dev_opt);
1983 		break;
1984 
1985 	case HCISETLINKPOL:
1986 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1987 				   HCI_INIT_TIMEOUT, NULL);
1988 		break;
1989 
1990 	case HCISETLINKMODE:
1991 		hdev->link_mode = ((__u16) dr.dev_opt) &
1992 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
1993 		break;
1994 
1995 	case HCISETPTYPE:
1996 		if (hdev->pkt_type == (__u16) dr.dev_opt)
1997 			break;
1998 
1999 		hdev->pkt_type = (__u16) dr.dev_opt;
2000 		mgmt_phy_configuration_changed(hdev, NULL);
2001 		break;
2002 
2003 	case HCISETACLMTU:
2004 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2005 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2006 		break;
2007 
2008 	case HCISETSCOMTU:
2009 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2010 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2011 		break;
2012 
2013 	default:
2014 		err = -EINVAL;
2015 		break;
2016 	}
2017 
2018 done:
2019 	hci_dev_put(hdev);
2020 	return err;
2021 }
2022 
hci_get_dev_list(void __user * arg)2023 int hci_get_dev_list(void __user *arg)
2024 {
2025 	struct hci_dev *hdev;
2026 	struct hci_dev_list_req *dl;
2027 	struct hci_dev_req *dr;
2028 	int n = 0, size, err;
2029 	__u16 dev_num;
2030 
2031 	if (get_user(dev_num, (__u16 __user *) arg))
2032 		return -EFAULT;
2033 
2034 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2035 		return -EINVAL;
2036 
2037 	size = sizeof(*dl) + dev_num * sizeof(*dr);
2038 
2039 	dl = kzalloc(size, GFP_KERNEL);
2040 	if (!dl)
2041 		return -ENOMEM;
2042 
2043 	dr = dl->dev_req;
2044 
2045 	read_lock(&hci_dev_list_lock);
2046 	list_for_each_entry(hdev, &hci_dev_list, list) {
2047 		unsigned long flags = hdev->flags;
2048 
2049 		/* When the auto-off is configured it means the transport
2050 		 * is running, but in that case still indicate that the
2051 		 * device is actually down.
2052 		 */
2053 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2054 			flags &= ~BIT(HCI_UP);
2055 
2056 		(dr + n)->dev_id  = hdev->id;
2057 		(dr + n)->dev_opt = flags;
2058 
2059 		if (++n >= dev_num)
2060 			break;
2061 	}
2062 	read_unlock(&hci_dev_list_lock);
2063 
2064 	dl->dev_num = n;
2065 	size = sizeof(*dl) + n * sizeof(*dr);
2066 
2067 	err = copy_to_user(arg, dl, size);
2068 	kfree(dl);
2069 
2070 	return err ? -EFAULT : 0;
2071 }
2072 
hci_get_dev_info(void __user * arg)2073 int hci_get_dev_info(void __user *arg)
2074 {
2075 	struct hci_dev *hdev;
2076 	struct hci_dev_info di;
2077 	unsigned long flags;
2078 	int err = 0;
2079 
2080 	if (copy_from_user(&di, arg, sizeof(di)))
2081 		return -EFAULT;
2082 
2083 	hdev = hci_dev_get(di.dev_id);
2084 	if (!hdev)
2085 		return -ENODEV;
2086 
2087 	/* When the auto-off is configured it means the transport
2088 	 * is running, but in that case still indicate that the
2089 	 * device is actually down.
2090 	 */
2091 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2092 		flags = hdev->flags & ~BIT(HCI_UP);
2093 	else
2094 		flags = hdev->flags;
2095 
2096 	strcpy(di.name, hdev->name);
2097 	di.bdaddr   = hdev->bdaddr;
2098 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2099 	di.flags    = flags;
2100 	di.pkt_type = hdev->pkt_type;
2101 	if (lmp_bredr_capable(hdev)) {
2102 		di.acl_mtu  = hdev->acl_mtu;
2103 		di.acl_pkts = hdev->acl_pkts;
2104 		di.sco_mtu  = hdev->sco_mtu;
2105 		di.sco_pkts = hdev->sco_pkts;
2106 	} else {
2107 		di.acl_mtu  = hdev->le_mtu;
2108 		di.acl_pkts = hdev->le_pkts;
2109 		di.sco_mtu  = 0;
2110 		di.sco_pkts = 0;
2111 	}
2112 	di.link_policy = hdev->link_policy;
2113 	di.link_mode   = hdev->link_mode;
2114 
2115 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2116 	memcpy(&di.features, &hdev->features, sizeof(di.features));
2117 
2118 	if (copy_to_user(arg, &di, sizeof(di)))
2119 		err = -EFAULT;
2120 
2121 	hci_dev_put(hdev);
2122 
2123 	return err;
2124 }
2125 
2126 /* ---- Interface to HCI drivers ---- */
2127 
hci_rfkill_set_block(void * data,bool blocked)2128 static int hci_rfkill_set_block(void *data, bool blocked)
2129 {
2130 	struct hci_dev *hdev = data;
2131 
2132 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2133 
2134 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2135 		return -EBUSY;
2136 
2137 	if (blocked) {
2138 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2139 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2140 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
2141 			hci_dev_do_close(hdev);
2142 	} else {
2143 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
2144 	}
2145 
2146 	return 0;
2147 }
2148 
2149 static const struct rfkill_ops hci_rfkill_ops = {
2150 	.set_block = hci_rfkill_set_block,
2151 };
2152 
hci_power_on(struct work_struct * work)2153 static void hci_power_on(struct work_struct *work)
2154 {
2155 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2156 	int err;
2157 
2158 	BT_DBG("%s", hdev->name);
2159 
2160 	if (test_bit(HCI_UP, &hdev->flags) &&
2161 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
2162 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2163 		cancel_delayed_work(&hdev->power_off);
2164 		hci_req_sync_lock(hdev);
2165 		err = __hci_req_hci_power_on(hdev);
2166 		hci_req_sync_unlock(hdev);
2167 		mgmt_power_on(hdev, err);
2168 		return;
2169 	}
2170 
2171 	err = hci_dev_do_open(hdev);
2172 	if (err < 0) {
2173 		hci_dev_lock(hdev);
2174 		mgmt_set_powered_failed(hdev, err);
2175 		hci_dev_unlock(hdev);
2176 		return;
2177 	}
2178 
2179 	/* During the HCI setup phase, a few error conditions are
2180 	 * ignored and they need to be checked now. If they are still
2181 	 * valid, it is important to turn the device back off.
2182 	 */
2183 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2184 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2185 	    (hdev->dev_type == HCI_PRIMARY &&
2186 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2187 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2188 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2189 		hci_dev_do_close(hdev);
2190 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2191 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2192 				   HCI_AUTO_OFF_TIMEOUT);
2193 	}
2194 
2195 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2196 		/* For unconfigured devices, set the HCI_RAW flag
2197 		 * so that userspace can easily identify them.
2198 		 */
2199 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2200 			set_bit(HCI_RAW, &hdev->flags);
2201 
2202 		/* For fully configured devices, this will send
2203 		 * the Index Added event. For unconfigured devices,
2204 		 * it will send Unconfigued Index Added event.
2205 		 *
2206 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2207 		 * and no event will be send.
2208 		 */
2209 		mgmt_index_added(hdev);
2210 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2211 		/* When the controller is now configured, then it
2212 		 * is important to clear the HCI_RAW flag.
2213 		 */
2214 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2215 			clear_bit(HCI_RAW, &hdev->flags);
2216 
2217 		/* Powering on the controller with HCI_CONFIG set only
2218 		 * happens with the transition from unconfigured to
2219 		 * configured. This will send the Index Added event.
2220 		 */
2221 		mgmt_index_added(hdev);
2222 	}
2223 }
2224 
hci_power_off(struct work_struct * work)2225 static void hci_power_off(struct work_struct *work)
2226 {
2227 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2228 					    power_off.work);
2229 
2230 	BT_DBG("%s", hdev->name);
2231 
2232 	hci_dev_do_close(hdev);
2233 }
2234 
hci_error_reset(struct work_struct * work)2235 static void hci_error_reset(struct work_struct *work)
2236 {
2237 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2238 
2239 	BT_DBG("%s", hdev->name);
2240 
2241 	if (hdev->hw_error)
2242 		hdev->hw_error(hdev, hdev->hw_error_code);
2243 	else
2244 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2245 
2246 	if (hci_dev_do_close(hdev))
2247 		return;
2248 
2249 	hci_dev_do_open(hdev);
2250 }
2251 
hci_uuids_clear(struct hci_dev * hdev)2252 void hci_uuids_clear(struct hci_dev *hdev)
2253 {
2254 	struct bt_uuid *uuid, *tmp;
2255 
2256 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2257 		list_del(&uuid->list);
2258 		kfree(uuid);
2259 	}
2260 }
2261 
hci_link_keys_clear(struct hci_dev * hdev)2262 void hci_link_keys_clear(struct hci_dev *hdev)
2263 {
2264 	struct link_key *key;
2265 
2266 	list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2267 		list_del_rcu(&key->list);
2268 		kfree_rcu(key, rcu);
2269 	}
2270 }
2271 
hci_smp_ltks_clear(struct hci_dev * hdev)2272 void hci_smp_ltks_clear(struct hci_dev *hdev)
2273 {
2274 	struct smp_ltk *k;
2275 
2276 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2277 		list_del_rcu(&k->list);
2278 		kfree_rcu(k, rcu);
2279 	}
2280 }
2281 
hci_smp_irks_clear(struct hci_dev * hdev)2282 void hci_smp_irks_clear(struct hci_dev *hdev)
2283 {
2284 	struct smp_irk *k;
2285 
2286 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2287 		list_del_rcu(&k->list);
2288 		kfree_rcu(k, rcu);
2289 	}
2290 }
2291 
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2292 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2293 {
2294 	struct link_key *k;
2295 
2296 	rcu_read_lock();
2297 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2298 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
2299 			rcu_read_unlock();
2300 			return k;
2301 		}
2302 	}
2303 	rcu_read_unlock();
2304 
2305 	return NULL;
2306 }
2307 
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)2308 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2309 			       u8 key_type, u8 old_key_type)
2310 {
2311 	/* Legacy key */
2312 	if (key_type < 0x03)
2313 		return true;
2314 
2315 	/* Debug keys are insecure so don't store them persistently */
2316 	if (key_type == HCI_LK_DEBUG_COMBINATION)
2317 		return false;
2318 
2319 	/* Changed combination key and there's no previous one */
2320 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2321 		return false;
2322 
2323 	/* Security mode 3 case */
2324 	if (!conn)
2325 		return true;
2326 
2327 	/* BR/EDR key derived using SC from an LE link */
2328 	if (conn->type == LE_LINK)
2329 		return true;
2330 
2331 	/* Neither local nor remote side had no-bonding as requirement */
2332 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2333 		return true;
2334 
2335 	/* Local side had dedicated bonding as requirement */
2336 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2337 		return true;
2338 
2339 	/* Remote side had dedicated bonding as requirement */
2340 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2341 		return true;
2342 
2343 	/* If none of the above criteria match, then don't store the key
2344 	 * persistently */
2345 	return false;
2346 }
2347 
ltk_role(u8 type)2348 static u8 ltk_role(u8 type)
2349 {
2350 	if (type == SMP_LTK)
2351 		return HCI_ROLE_MASTER;
2352 
2353 	return HCI_ROLE_SLAVE;
2354 }
2355 
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)2356 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2357 			     u8 addr_type, u8 role)
2358 {
2359 	struct smp_ltk *k;
2360 
2361 	rcu_read_lock();
2362 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2363 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2364 			continue;
2365 
2366 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2367 			rcu_read_unlock();
2368 			return k;
2369 		}
2370 	}
2371 	rcu_read_unlock();
2372 
2373 	return NULL;
2374 }
2375 
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)2376 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2377 {
2378 	struct smp_irk *irk;
2379 
2380 	rcu_read_lock();
2381 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2382 		if (!bacmp(&irk->rpa, rpa)) {
2383 			rcu_read_unlock();
2384 			return irk;
2385 		}
2386 	}
2387 
2388 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2389 		if (smp_irk_matches(hdev, irk->val, rpa)) {
2390 			bacpy(&irk->rpa, rpa);
2391 			rcu_read_unlock();
2392 			return irk;
2393 		}
2394 	}
2395 	rcu_read_unlock();
2396 
2397 	return NULL;
2398 }
2399 
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2400 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401 				     u8 addr_type)
2402 {
2403 	struct smp_irk *irk;
2404 
2405 	/* Identity Address must be public or static random */
2406 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2407 		return NULL;
2408 
2409 	rcu_read_lock();
2410 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2411 		if (addr_type == irk->addr_type &&
2412 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
2413 			rcu_read_unlock();
2414 			return irk;
2415 		}
2416 	}
2417 	rcu_read_unlock();
2418 
2419 	return NULL;
2420 }
2421 
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)2422 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2423 				  bdaddr_t *bdaddr, u8 *val, u8 type,
2424 				  u8 pin_len, bool *persistent)
2425 {
2426 	struct link_key *key, *old_key;
2427 	u8 old_key_type;
2428 
2429 	old_key = hci_find_link_key(hdev, bdaddr);
2430 	if (old_key) {
2431 		old_key_type = old_key->type;
2432 		key = old_key;
2433 	} else {
2434 		old_key_type = conn ? conn->key_type : 0xff;
2435 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2436 		if (!key)
2437 			return NULL;
2438 		list_add_rcu(&key->list, &hdev->link_keys);
2439 	}
2440 
2441 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2442 
2443 	/* Some buggy controller combinations generate a changed
2444 	 * combination key for legacy pairing even when there's no
2445 	 * previous key */
2446 	if (type == HCI_LK_CHANGED_COMBINATION &&
2447 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2448 		type = HCI_LK_COMBINATION;
2449 		if (conn)
2450 			conn->key_type = type;
2451 	}
2452 
2453 	bacpy(&key->bdaddr, bdaddr);
2454 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2455 	key->pin_len = pin_len;
2456 
2457 	if (type == HCI_LK_CHANGED_COMBINATION)
2458 		key->type = old_key_type;
2459 	else
2460 		key->type = type;
2461 
2462 	if (persistent)
2463 		*persistent = hci_persistent_key(hdev, conn, type,
2464 						 old_key_type);
2465 
2466 	return key;
2467 }
2468 
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)2469 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2470 			    u8 addr_type, u8 type, u8 authenticated,
2471 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2472 {
2473 	struct smp_ltk *key, *old_key;
2474 	u8 role = ltk_role(type);
2475 
2476 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2477 	if (old_key)
2478 		key = old_key;
2479 	else {
2480 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2481 		if (!key)
2482 			return NULL;
2483 		list_add_rcu(&key->list, &hdev->long_term_keys);
2484 	}
2485 
2486 	bacpy(&key->bdaddr, bdaddr);
2487 	key->bdaddr_type = addr_type;
2488 	memcpy(key->val, tk, sizeof(key->val));
2489 	key->authenticated = authenticated;
2490 	key->ediv = ediv;
2491 	key->rand = rand;
2492 	key->enc_size = enc_size;
2493 	key->type = type;
2494 
2495 	return key;
2496 }
2497 
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)2498 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2499 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
2500 {
2501 	struct smp_irk *irk;
2502 
2503 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2504 	if (!irk) {
2505 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2506 		if (!irk)
2507 			return NULL;
2508 
2509 		bacpy(&irk->bdaddr, bdaddr);
2510 		irk->addr_type = addr_type;
2511 
2512 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2513 	}
2514 
2515 	memcpy(irk->val, val, 16);
2516 	bacpy(&irk->rpa, rpa);
2517 
2518 	return irk;
2519 }
2520 
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2521 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2522 {
2523 	struct link_key *key;
2524 
2525 	key = hci_find_link_key(hdev, bdaddr);
2526 	if (!key)
2527 		return -ENOENT;
2528 
2529 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2530 
2531 	list_del_rcu(&key->list);
2532 	kfree_rcu(key, rcu);
2533 
2534 	return 0;
2535 }
2536 
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2537 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2538 {
2539 	struct smp_ltk *k;
2540 	int removed = 0;
2541 
2542 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2543 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2544 			continue;
2545 
2546 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2547 
2548 		list_del_rcu(&k->list);
2549 		kfree_rcu(k, rcu);
2550 		removed++;
2551 	}
2552 
2553 	return removed ? 0 : -ENOENT;
2554 }
2555 
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2556 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2557 {
2558 	struct smp_irk *k;
2559 
2560 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2561 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2562 			continue;
2563 
2564 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2565 
2566 		list_del_rcu(&k->list);
2567 		kfree_rcu(k, rcu);
2568 	}
2569 }
2570 
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)2571 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2572 {
2573 	struct smp_ltk *k;
2574 	struct smp_irk *irk;
2575 	u8 addr_type;
2576 
2577 	if (type == BDADDR_BREDR) {
2578 		if (hci_find_link_key(hdev, bdaddr))
2579 			return true;
2580 		return false;
2581 	}
2582 
2583 	/* Convert to HCI addr type which struct smp_ltk uses */
2584 	if (type == BDADDR_LE_PUBLIC)
2585 		addr_type = ADDR_LE_DEV_PUBLIC;
2586 	else
2587 		addr_type = ADDR_LE_DEV_RANDOM;
2588 
2589 	irk = hci_get_irk(hdev, bdaddr, addr_type);
2590 	if (irk) {
2591 		bdaddr = &irk->bdaddr;
2592 		addr_type = irk->addr_type;
2593 	}
2594 
2595 	rcu_read_lock();
2596 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2597 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2598 			rcu_read_unlock();
2599 			return true;
2600 		}
2601 	}
2602 	rcu_read_unlock();
2603 
2604 	return false;
2605 }
2606 
2607 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)2608 static void hci_cmd_timeout(struct work_struct *work)
2609 {
2610 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2611 					    cmd_timer.work);
2612 
2613 	if (hdev->sent_cmd) {
2614 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2615 		u16 opcode = __le16_to_cpu(sent->opcode);
2616 
2617 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2618 	} else {
2619 		bt_dev_err(hdev, "command tx timeout");
2620 	}
2621 
2622 	if (hdev->cmd_timeout)
2623 		hdev->cmd_timeout(hdev);
2624 
2625 	atomic_set(&hdev->cmd_cnt, 1);
2626 	queue_work(hdev->workqueue, &hdev->cmd_work);
2627 }
2628 
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2629 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2630 					  bdaddr_t *bdaddr, u8 bdaddr_type)
2631 {
2632 	struct oob_data *data;
2633 
2634 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
2635 		if (bacmp(bdaddr, &data->bdaddr) != 0)
2636 			continue;
2637 		if (data->bdaddr_type != bdaddr_type)
2638 			continue;
2639 		return data;
2640 	}
2641 
2642 	return NULL;
2643 }
2644 
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2645 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2646 			       u8 bdaddr_type)
2647 {
2648 	struct oob_data *data;
2649 
2650 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2651 	if (!data)
2652 		return -ENOENT;
2653 
2654 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2655 
2656 	list_del(&data->list);
2657 	kfree(data);
2658 
2659 	return 0;
2660 }
2661 
hci_remote_oob_data_clear(struct hci_dev * hdev)2662 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2663 {
2664 	struct oob_data *data, *n;
2665 
2666 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2667 		list_del(&data->list);
2668 		kfree(data);
2669 	}
2670 }
2671 
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)2672 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2673 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
2674 			    u8 *hash256, u8 *rand256)
2675 {
2676 	struct oob_data *data;
2677 
2678 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2679 	if (!data) {
2680 		data = kmalloc(sizeof(*data), GFP_KERNEL);
2681 		if (!data)
2682 			return -ENOMEM;
2683 
2684 		bacpy(&data->bdaddr, bdaddr);
2685 		data->bdaddr_type = bdaddr_type;
2686 		list_add(&data->list, &hdev->remote_oob_data);
2687 	}
2688 
2689 	if (hash192 && rand192) {
2690 		memcpy(data->hash192, hash192, sizeof(data->hash192));
2691 		memcpy(data->rand192, rand192, sizeof(data->rand192));
2692 		if (hash256 && rand256)
2693 			data->present = 0x03;
2694 	} else {
2695 		memset(data->hash192, 0, sizeof(data->hash192));
2696 		memset(data->rand192, 0, sizeof(data->rand192));
2697 		if (hash256 && rand256)
2698 			data->present = 0x02;
2699 		else
2700 			data->present = 0x00;
2701 	}
2702 
2703 	if (hash256 && rand256) {
2704 		memcpy(data->hash256, hash256, sizeof(data->hash256));
2705 		memcpy(data->rand256, rand256, sizeof(data->rand256));
2706 	} else {
2707 		memset(data->hash256, 0, sizeof(data->hash256));
2708 		memset(data->rand256, 0, sizeof(data->rand256));
2709 		if (hash192 && rand192)
2710 			data->present = 0x01;
2711 	}
2712 
2713 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
2714 
2715 	return 0;
2716 }
2717 
2718 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)2719 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2720 {
2721 	struct adv_info *adv_instance;
2722 
2723 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2724 		if (adv_instance->instance == instance)
2725 			return adv_instance;
2726 	}
2727 
2728 	return NULL;
2729 }
2730 
2731 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)2732 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2733 {
2734 	struct adv_info *cur_instance;
2735 
2736 	cur_instance = hci_find_adv_instance(hdev, instance);
2737 	if (!cur_instance)
2738 		return NULL;
2739 
2740 	if (cur_instance == list_last_entry(&hdev->adv_instances,
2741 					    struct adv_info, list))
2742 		return list_first_entry(&hdev->adv_instances,
2743 						 struct adv_info, list);
2744 	else
2745 		return list_next_entry(cur_instance, list);
2746 }
2747 
2748 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)2749 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2750 {
2751 	struct adv_info *adv_instance;
2752 
2753 	adv_instance = hci_find_adv_instance(hdev, instance);
2754 	if (!adv_instance)
2755 		return -ENOENT;
2756 
2757 	BT_DBG("%s removing %dMR", hdev->name, instance);
2758 
2759 	if (hdev->cur_adv_instance == instance) {
2760 		if (hdev->adv_instance_timeout) {
2761 			cancel_delayed_work(&hdev->adv_instance_expire);
2762 			hdev->adv_instance_timeout = 0;
2763 		}
2764 		hdev->cur_adv_instance = 0x00;
2765 	}
2766 
2767 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2768 
2769 	list_del(&adv_instance->list);
2770 	kfree(adv_instance);
2771 
2772 	hdev->adv_instance_cnt--;
2773 
2774 	return 0;
2775 }
2776 
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)2777 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2778 {
2779 	struct adv_info *adv_instance, *n;
2780 
2781 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2782 		adv_instance->rpa_expired = rpa_expired;
2783 }
2784 
2785 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)2786 void hci_adv_instances_clear(struct hci_dev *hdev)
2787 {
2788 	struct adv_info *adv_instance, *n;
2789 
2790 	if (hdev->adv_instance_timeout) {
2791 		cancel_delayed_work(&hdev->adv_instance_expire);
2792 		hdev->adv_instance_timeout = 0;
2793 	}
2794 
2795 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2796 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2797 		list_del(&adv_instance->list);
2798 		kfree(adv_instance);
2799 	}
2800 
2801 	hdev->adv_instance_cnt = 0;
2802 	hdev->cur_adv_instance = 0x00;
2803 }
2804 
adv_instance_rpa_expired(struct work_struct * work)2805 static void adv_instance_rpa_expired(struct work_struct *work)
2806 {
2807 	struct adv_info *adv_instance = container_of(work, struct adv_info,
2808 						     rpa_expired_cb.work);
2809 
2810 	BT_DBG("");
2811 
2812 	adv_instance->rpa_expired = true;
2813 }
2814 
2815 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration)2816 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2817 			 u16 adv_data_len, u8 *adv_data,
2818 			 u16 scan_rsp_len, u8 *scan_rsp_data,
2819 			 u16 timeout, u16 duration)
2820 {
2821 	struct adv_info *adv_instance;
2822 
2823 	adv_instance = hci_find_adv_instance(hdev, instance);
2824 	if (adv_instance) {
2825 		memset(adv_instance->adv_data, 0,
2826 		       sizeof(adv_instance->adv_data));
2827 		memset(adv_instance->scan_rsp_data, 0,
2828 		       sizeof(adv_instance->scan_rsp_data));
2829 	} else {
2830 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2831 		    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2832 			return -EOVERFLOW;
2833 
2834 		adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2835 		if (!adv_instance)
2836 			return -ENOMEM;
2837 
2838 		adv_instance->pending = true;
2839 		adv_instance->instance = instance;
2840 		list_add(&adv_instance->list, &hdev->adv_instances);
2841 		hdev->adv_instance_cnt++;
2842 	}
2843 
2844 	adv_instance->flags = flags;
2845 	adv_instance->adv_data_len = adv_data_len;
2846 	adv_instance->scan_rsp_len = scan_rsp_len;
2847 
2848 	if (adv_data_len)
2849 		memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2850 
2851 	if (scan_rsp_len)
2852 		memcpy(adv_instance->scan_rsp_data,
2853 		       scan_rsp_data, scan_rsp_len);
2854 
2855 	adv_instance->timeout = timeout;
2856 	adv_instance->remaining_time = timeout;
2857 
2858 	if (duration == 0)
2859 		adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2860 	else
2861 		adv_instance->duration = duration;
2862 
2863 	adv_instance->tx_power = HCI_TX_POWER_INVALID;
2864 
2865 	INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2866 			  adv_instance_rpa_expired);
2867 
2868 	BT_DBG("%s for %dMR", hdev->name, instance);
2869 
2870 	return 0;
2871 }
2872 
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2873 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2874 					 bdaddr_t *bdaddr, u8 type)
2875 {
2876 	struct bdaddr_list *b;
2877 
2878 	list_for_each_entry(b, bdaddr_list, list) {
2879 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2880 			return b;
2881 	}
2882 
2883 	return NULL;
2884 }
2885 
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2886 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2887 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2888 				u8 type)
2889 {
2890 	struct bdaddr_list_with_irk *b;
2891 
2892 	list_for_each_entry(b, bdaddr_list, list) {
2893 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2894 			return b;
2895 	}
2896 
2897 	return NULL;
2898 }
2899 
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2900 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2901 {
2902 	struct bdaddr_list *b, *n;
2903 
2904 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2905 		list_del(&b->list);
2906 		kfree(b);
2907 	}
2908 }
2909 
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2910 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2911 {
2912 	struct bdaddr_list *entry;
2913 
2914 	if (!bacmp(bdaddr, BDADDR_ANY))
2915 		return -EBADF;
2916 
2917 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2918 		return -EEXIST;
2919 
2920 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2921 	if (!entry)
2922 		return -ENOMEM;
2923 
2924 	bacpy(&entry->bdaddr, bdaddr);
2925 	entry->bdaddr_type = type;
2926 
2927 	list_add(&entry->list, list);
2928 
2929 	return 0;
2930 }
2931 
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2932 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2933 					u8 type, u8 *peer_irk, u8 *local_irk)
2934 {
2935 	struct bdaddr_list_with_irk *entry;
2936 
2937 	if (!bacmp(bdaddr, BDADDR_ANY))
2938 		return -EBADF;
2939 
2940 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2941 		return -EEXIST;
2942 
2943 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2944 	if (!entry)
2945 		return -ENOMEM;
2946 
2947 	bacpy(&entry->bdaddr, bdaddr);
2948 	entry->bdaddr_type = type;
2949 
2950 	if (peer_irk)
2951 		memcpy(entry->peer_irk, peer_irk, 16);
2952 
2953 	if (local_irk)
2954 		memcpy(entry->local_irk, local_irk, 16);
2955 
2956 	list_add(&entry->list, list);
2957 
2958 	return 0;
2959 }
2960 
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2961 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2962 {
2963 	struct bdaddr_list *entry;
2964 
2965 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2966 		hci_bdaddr_list_clear(list);
2967 		return 0;
2968 	}
2969 
2970 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2971 	if (!entry)
2972 		return -ENOENT;
2973 
2974 	list_del(&entry->list);
2975 	kfree(entry);
2976 
2977 	return 0;
2978 }
2979 
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2980 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2981 							u8 type)
2982 {
2983 	struct bdaddr_list_with_irk *entry;
2984 
2985 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2986 		hci_bdaddr_list_clear(list);
2987 		return 0;
2988 	}
2989 
2990 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2991 	if (!entry)
2992 		return -ENOENT;
2993 
2994 	list_del(&entry->list);
2995 	kfree(entry);
2996 
2997 	return 0;
2998 }
2999 
3000 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3001 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3002 					       bdaddr_t *addr, u8 addr_type)
3003 {
3004 	struct hci_conn_params *params;
3005 
3006 	list_for_each_entry(params, &hdev->le_conn_params, list) {
3007 		if (bacmp(&params->addr, addr) == 0 &&
3008 		    params->addr_type == addr_type) {
3009 			return params;
3010 		}
3011 	}
3012 
3013 	return NULL;
3014 }
3015 
3016 /* This function requires the caller holds hdev->lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)3017 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3018 						  bdaddr_t *addr, u8 addr_type)
3019 {
3020 	struct hci_conn_params *param;
3021 
3022 	list_for_each_entry(param, list, action) {
3023 		if (bacmp(&param->addr, addr) == 0 &&
3024 		    param->addr_type == addr_type)
3025 			return param;
3026 	}
3027 
3028 	return NULL;
3029 }
3030 
3031 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3032 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3033 					    bdaddr_t *addr, u8 addr_type)
3034 {
3035 	struct hci_conn_params *params;
3036 
3037 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3038 	if (params)
3039 		return params;
3040 
3041 	params = kzalloc(sizeof(*params), GFP_KERNEL);
3042 	if (!params) {
3043 		bt_dev_err(hdev, "out of memory");
3044 		return NULL;
3045 	}
3046 
3047 	bacpy(&params->addr, addr);
3048 	params->addr_type = addr_type;
3049 
3050 	list_add(&params->list, &hdev->le_conn_params);
3051 	INIT_LIST_HEAD(&params->action);
3052 
3053 	params->conn_min_interval = hdev->le_conn_min_interval;
3054 	params->conn_max_interval = hdev->le_conn_max_interval;
3055 	params->conn_latency = hdev->le_conn_latency;
3056 	params->supervision_timeout = hdev->le_supv_timeout;
3057 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
3058 
3059 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3060 
3061 	return params;
3062 }
3063 
hci_conn_params_free(struct hci_conn_params * params)3064 static void hci_conn_params_free(struct hci_conn_params *params)
3065 {
3066 	if (params->conn) {
3067 		hci_conn_drop(params->conn);
3068 		hci_conn_put(params->conn);
3069 	}
3070 
3071 	list_del(&params->action);
3072 	list_del(&params->list);
3073 	kfree(params);
3074 }
3075 
3076 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3077 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3078 {
3079 	struct hci_conn_params *params;
3080 
3081 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3082 	if (!params)
3083 		return;
3084 
3085 	hci_conn_params_free(params);
3086 
3087 	hci_update_background_scan(hdev);
3088 
3089 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3090 }
3091 
3092 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)3093 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3094 {
3095 	struct hci_conn_params *params, *tmp;
3096 
3097 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3098 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3099 			continue;
3100 
3101 		/* If trying to estabilish one time connection to disabled
3102 		 * device, leave the params, but mark them as just once.
3103 		 */
3104 		if (params->explicit_connect) {
3105 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3106 			continue;
3107 		}
3108 
3109 		list_del(&params->list);
3110 		kfree(params);
3111 	}
3112 
3113 	BT_DBG("All LE disabled connection parameters were removed");
3114 }
3115 
3116 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)3117 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3118 {
3119 	struct hci_conn_params *params, *tmp;
3120 
3121 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3122 		hci_conn_params_free(params);
3123 
3124 	BT_DBG("All LE connection parameters were removed");
3125 }
3126 
3127 /* Copy the Identity Address of the controller.
3128  *
3129  * If the controller has a public BD_ADDR, then by default use that one.
3130  * If this is a LE only controller without a public address, default to
3131  * the static random address.
3132  *
3133  * For debugging purposes it is possible to force controllers with a
3134  * public address to use the static random address instead.
3135  *
3136  * In case BR/EDR has been disabled on a dual-mode controller and
3137  * userspace has configured a static address, then that address
3138  * becomes the identity address instead of the public BR/EDR address.
3139  */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)3140 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3141 			       u8 *bdaddr_type)
3142 {
3143 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3144 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3145 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3146 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
3147 		bacpy(bdaddr, &hdev->static_addr);
3148 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
3149 	} else {
3150 		bacpy(bdaddr, &hdev->bdaddr);
3151 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
3152 	}
3153 }
3154 
3155 /* Alloc HCI device */
hci_alloc_dev(void)3156 struct hci_dev *hci_alloc_dev(void)
3157 {
3158 	struct hci_dev *hdev;
3159 
3160 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3161 	if (!hdev)
3162 		return NULL;
3163 
3164 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3165 	hdev->esco_type = (ESCO_HV1);
3166 	hdev->link_mode = (HCI_LM_ACCEPT);
3167 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
3168 	hdev->io_capability = 0x03;	/* No Input No Output */
3169 	hdev->manufacturer = 0xffff;	/* Default to internal use */
3170 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3171 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3172 	hdev->adv_instance_cnt = 0;
3173 	hdev->cur_adv_instance = 0x00;
3174 	hdev->adv_instance_timeout = 0;
3175 
3176 	hdev->sniff_max_interval = 800;
3177 	hdev->sniff_min_interval = 80;
3178 
3179 	hdev->le_adv_channel_map = 0x07;
3180 	hdev->le_adv_min_interval = 0x0800;
3181 	hdev->le_adv_max_interval = 0x0800;
3182 	hdev->le_scan_interval = 0x0060;
3183 	hdev->le_scan_window = 0x0030;
3184 	hdev->le_conn_min_interval = 0x0018;
3185 	hdev->le_conn_max_interval = 0x0028;
3186 	hdev->le_conn_latency = 0x0000;
3187 	hdev->le_supv_timeout = 0x002a;
3188 	hdev->le_def_tx_len = 0x001b;
3189 	hdev->le_def_tx_time = 0x0148;
3190 	hdev->le_max_tx_len = 0x001b;
3191 	hdev->le_max_tx_time = 0x0148;
3192 	hdev->le_max_rx_len = 0x001b;
3193 	hdev->le_max_rx_time = 0x0148;
3194 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3195 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3196 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3197 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3198 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3199 
3200 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3201 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3202 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3203 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3204 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3205 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3206 
3207 	mutex_init(&hdev->lock);
3208 	mutex_init(&hdev->req_lock);
3209 
3210 	INIT_LIST_HEAD(&hdev->mgmt_pending);
3211 	INIT_LIST_HEAD(&hdev->blacklist);
3212 	INIT_LIST_HEAD(&hdev->whitelist);
3213 	INIT_LIST_HEAD(&hdev->uuids);
3214 	INIT_LIST_HEAD(&hdev->link_keys);
3215 	INIT_LIST_HEAD(&hdev->long_term_keys);
3216 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3217 	INIT_LIST_HEAD(&hdev->remote_oob_data);
3218 	INIT_LIST_HEAD(&hdev->le_white_list);
3219 	INIT_LIST_HEAD(&hdev->le_resolv_list);
3220 	INIT_LIST_HEAD(&hdev->le_conn_params);
3221 	INIT_LIST_HEAD(&hdev->pend_le_conns);
3222 	INIT_LIST_HEAD(&hdev->pend_le_reports);
3223 	INIT_LIST_HEAD(&hdev->conn_hash.list);
3224 	INIT_LIST_HEAD(&hdev->adv_instances);
3225 
3226 	INIT_WORK(&hdev->rx_work, hci_rx_work);
3227 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3228 	INIT_WORK(&hdev->tx_work, hci_tx_work);
3229 	INIT_WORK(&hdev->power_on, hci_power_on);
3230 	INIT_WORK(&hdev->error_reset, hci_error_reset);
3231 
3232 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3233 
3234 	skb_queue_head_init(&hdev->rx_q);
3235 	skb_queue_head_init(&hdev->cmd_q);
3236 	skb_queue_head_init(&hdev->raw_q);
3237 
3238 	init_waitqueue_head(&hdev->req_wait_q);
3239 
3240 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3241 
3242 	hci_request_setup(hdev);
3243 
3244 	hci_init_sysfs(hdev);
3245 	discovery_init(hdev);
3246 
3247 	return hdev;
3248 }
3249 EXPORT_SYMBOL(hci_alloc_dev);
3250 
3251 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)3252 void hci_free_dev(struct hci_dev *hdev)
3253 {
3254 	/* will free via device release */
3255 	put_device(&hdev->dev);
3256 }
3257 EXPORT_SYMBOL(hci_free_dev);
3258 
3259 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)3260 int hci_register_dev(struct hci_dev *hdev)
3261 {
3262 	int id, error;
3263 
3264 	if (!hdev->open || !hdev->close || !hdev->send)
3265 		return -EINVAL;
3266 
3267 	/* Do not allow HCI_AMP devices to register at index 0,
3268 	 * so the index can be used as the AMP controller ID.
3269 	 */
3270 	switch (hdev->dev_type) {
3271 	case HCI_PRIMARY:
3272 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3273 		break;
3274 	case HCI_AMP:
3275 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3276 		break;
3277 	default:
3278 		return -EINVAL;
3279 	}
3280 
3281 	if (id < 0)
3282 		return id;
3283 
3284 	sprintf(hdev->name, "hci%d", id);
3285 	hdev->id = id;
3286 
3287 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3288 
3289 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3290 	if (!hdev->workqueue) {
3291 		error = -ENOMEM;
3292 		goto err;
3293 	}
3294 
3295 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3296 						      hdev->name);
3297 	if (!hdev->req_workqueue) {
3298 		destroy_workqueue(hdev->workqueue);
3299 		error = -ENOMEM;
3300 		goto err;
3301 	}
3302 
3303 	if (!IS_ERR_OR_NULL(bt_debugfs))
3304 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3305 
3306 	dev_set_name(&hdev->dev, "%s", hdev->name);
3307 
3308 	error = device_add(&hdev->dev);
3309 	if (error < 0)
3310 		goto err_wqueue;
3311 
3312 	hci_leds_init(hdev);
3313 
3314 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3315 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3316 				    hdev);
3317 	if (hdev->rfkill) {
3318 		if (rfkill_register(hdev->rfkill) < 0) {
3319 			rfkill_destroy(hdev->rfkill);
3320 			hdev->rfkill = NULL;
3321 		}
3322 	}
3323 
3324 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3325 		hci_dev_set_flag(hdev, HCI_RFKILLED);
3326 
3327 	hci_dev_set_flag(hdev, HCI_SETUP);
3328 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3329 
3330 	if (hdev->dev_type == HCI_PRIMARY) {
3331 		/* Assume BR/EDR support until proven otherwise (such as
3332 		 * through reading supported features during init.
3333 		 */
3334 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3335 	}
3336 
3337 	write_lock(&hci_dev_list_lock);
3338 	list_add(&hdev->list, &hci_dev_list);
3339 	write_unlock(&hci_dev_list_lock);
3340 
3341 	/* Devices that are marked for raw-only usage are unconfigured
3342 	 * and should not be included in normal operation.
3343 	 */
3344 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3345 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3346 
3347 	hci_sock_dev_event(hdev, HCI_DEV_REG);
3348 	hci_dev_hold(hdev);
3349 
3350 	queue_work(hdev->req_workqueue, &hdev->power_on);
3351 
3352 	return id;
3353 
3354 err_wqueue:
3355 	destroy_workqueue(hdev->workqueue);
3356 	destroy_workqueue(hdev->req_workqueue);
3357 err:
3358 	ida_simple_remove(&hci_index_ida, hdev->id);
3359 
3360 	return error;
3361 }
3362 EXPORT_SYMBOL(hci_register_dev);
3363 
3364 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)3365 void hci_unregister_dev(struct hci_dev *hdev)
3366 {
3367 	int id;
3368 
3369 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3370 
3371 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
3372 
3373 	id = hdev->id;
3374 
3375 	write_lock(&hci_dev_list_lock);
3376 	list_del(&hdev->list);
3377 	write_unlock(&hci_dev_list_lock);
3378 
3379 	cancel_work_sync(&hdev->power_on);
3380 
3381 	hci_dev_do_close(hdev);
3382 
3383 	if (!test_bit(HCI_INIT, &hdev->flags) &&
3384 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
3385 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3386 		hci_dev_lock(hdev);
3387 		mgmt_index_removed(hdev);
3388 		hci_dev_unlock(hdev);
3389 	}
3390 
3391 	/* mgmt_index_removed should take care of emptying the
3392 	 * pending list */
3393 	BUG_ON(!list_empty(&hdev->mgmt_pending));
3394 
3395 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3396 
3397 	if (hdev->rfkill) {
3398 		rfkill_unregister(hdev->rfkill);
3399 		rfkill_destroy(hdev->rfkill);
3400 	}
3401 
3402 	device_del(&hdev->dev);
3403 
3404 	debugfs_remove_recursive(hdev->debugfs);
3405 	kfree_const(hdev->hw_info);
3406 	kfree_const(hdev->fw_info);
3407 
3408 	destroy_workqueue(hdev->workqueue);
3409 	destroy_workqueue(hdev->req_workqueue);
3410 
3411 	hci_dev_lock(hdev);
3412 	hci_bdaddr_list_clear(&hdev->blacklist);
3413 	hci_bdaddr_list_clear(&hdev->whitelist);
3414 	hci_uuids_clear(hdev);
3415 	hci_link_keys_clear(hdev);
3416 	hci_smp_ltks_clear(hdev);
3417 	hci_smp_irks_clear(hdev);
3418 	hci_remote_oob_data_clear(hdev);
3419 	hci_adv_instances_clear(hdev);
3420 	hci_bdaddr_list_clear(&hdev->le_white_list);
3421 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
3422 	hci_conn_params_clear_all(hdev);
3423 	hci_discovery_filter_clear(hdev);
3424 	hci_dev_unlock(hdev);
3425 
3426 	hci_dev_put(hdev);
3427 
3428 	ida_simple_remove(&hci_index_ida, id);
3429 }
3430 EXPORT_SYMBOL(hci_unregister_dev);
3431 
3432 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)3433 int hci_suspend_dev(struct hci_dev *hdev)
3434 {
3435 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3436 	return 0;
3437 }
3438 EXPORT_SYMBOL(hci_suspend_dev);
3439 
3440 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)3441 int hci_resume_dev(struct hci_dev *hdev)
3442 {
3443 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3444 	return 0;
3445 }
3446 EXPORT_SYMBOL(hci_resume_dev);
3447 
3448 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)3449 int hci_reset_dev(struct hci_dev *hdev)
3450 {
3451 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3452 	struct sk_buff *skb;
3453 
3454 	skb = bt_skb_alloc(3, GFP_ATOMIC);
3455 	if (!skb)
3456 		return -ENOMEM;
3457 
3458 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3459 	skb_put_data(skb, hw_err, 3);
3460 
3461 	/* Send Hardware Error to upper stack */
3462 	return hci_recv_frame(hdev, skb);
3463 }
3464 EXPORT_SYMBOL(hci_reset_dev);
3465 
3466 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)3467 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3468 {
3469 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3470 		      && !test_bit(HCI_INIT, &hdev->flags))) {
3471 		kfree_skb(skb);
3472 		return -ENXIO;
3473 	}
3474 
3475 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3476 	    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3477 	    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3478 		kfree_skb(skb);
3479 		return -EINVAL;
3480 	}
3481 
3482 	/* Incoming skb */
3483 	bt_cb(skb)->incoming = 1;
3484 
3485 	/* Time stamp */
3486 	__net_timestamp(skb);
3487 
3488 	skb_queue_tail(&hdev->rx_q, skb);
3489 	queue_work(hdev->workqueue, &hdev->rx_work);
3490 
3491 	return 0;
3492 }
3493 EXPORT_SYMBOL(hci_recv_frame);
3494 
3495 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)3496 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3497 {
3498 	/* Mark as diagnostic packet */
3499 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3500 
3501 	/* Time stamp */
3502 	__net_timestamp(skb);
3503 
3504 	skb_queue_tail(&hdev->rx_q, skb);
3505 	queue_work(hdev->workqueue, &hdev->rx_work);
3506 
3507 	return 0;
3508 }
3509 EXPORT_SYMBOL(hci_recv_diag);
3510 
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)3511 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3512 {
3513 	va_list vargs;
3514 
3515 	va_start(vargs, fmt);
3516 	kfree_const(hdev->hw_info);
3517 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3518 	va_end(vargs);
3519 }
3520 EXPORT_SYMBOL(hci_set_hw_info);
3521 
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)3522 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3523 {
3524 	va_list vargs;
3525 
3526 	va_start(vargs, fmt);
3527 	kfree_const(hdev->fw_info);
3528 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3529 	va_end(vargs);
3530 }
3531 EXPORT_SYMBOL(hci_set_fw_info);
3532 
3533 /* ---- Interface to upper protocols ---- */
3534 
hci_register_cb(struct hci_cb * cb)3535 int hci_register_cb(struct hci_cb *cb)
3536 {
3537 	BT_DBG("%p name %s", cb, cb->name);
3538 
3539 	mutex_lock(&hci_cb_list_lock);
3540 	list_add_tail(&cb->list, &hci_cb_list);
3541 	mutex_unlock(&hci_cb_list_lock);
3542 
3543 	return 0;
3544 }
3545 EXPORT_SYMBOL(hci_register_cb);
3546 
hci_unregister_cb(struct hci_cb * cb)3547 int hci_unregister_cb(struct hci_cb *cb)
3548 {
3549 	BT_DBG("%p name %s", cb, cb->name);
3550 
3551 	mutex_lock(&hci_cb_list_lock);
3552 	list_del(&cb->list);
3553 	mutex_unlock(&hci_cb_list_lock);
3554 
3555 	return 0;
3556 }
3557 EXPORT_SYMBOL(hci_unregister_cb);
3558 
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3559 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3560 {
3561 	int err;
3562 
3563 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3564 	       skb->len);
3565 
3566 	/* Time stamp */
3567 	__net_timestamp(skb);
3568 
3569 	/* Send copy to monitor */
3570 	hci_send_to_monitor(hdev, skb);
3571 
3572 	if (atomic_read(&hdev->promisc)) {
3573 		/* Send copy to the sockets */
3574 		hci_send_to_sock(hdev, skb);
3575 	}
3576 
3577 	/* Get rid of skb owner, prior to sending to the driver. */
3578 	skb_orphan(skb);
3579 
3580 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3581 		kfree_skb(skb);
3582 		return;
3583 	}
3584 
3585 	err = hdev->send(hdev, skb);
3586 	if (err < 0) {
3587 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3588 		kfree_skb(skb);
3589 	}
3590 }
3591 
3592 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3593 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3594 		 const void *param)
3595 {
3596 	struct sk_buff *skb;
3597 
3598 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3599 
3600 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3601 	if (!skb) {
3602 		bt_dev_err(hdev, "no memory for command");
3603 		return -ENOMEM;
3604 	}
3605 
3606 	/* Stand-alone HCI commands must be flagged as
3607 	 * single-command requests.
3608 	 */
3609 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3610 
3611 	skb_queue_tail(&hdev->cmd_q, skb);
3612 	queue_work(hdev->workqueue, &hdev->cmd_work);
3613 
3614 	return 0;
3615 }
3616 
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3617 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3618 		   const void *param)
3619 {
3620 	struct sk_buff *skb;
3621 
3622 	if (hci_opcode_ogf(opcode) != 0x3f) {
3623 		/* A controller receiving a command shall respond with either
3624 		 * a Command Status Event or a Command Complete Event.
3625 		 * Therefore, all standard HCI commands must be sent via the
3626 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3627 		 * Some vendors do not comply with this rule for vendor-specific
3628 		 * commands and do not return any event. We want to support
3629 		 * unresponded commands for such cases only.
3630 		 */
3631 		bt_dev_err(hdev, "unresponded command not supported");
3632 		return -EINVAL;
3633 	}
3634 
3635 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3636 	if (!skb) {
3637 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3638 			   opcode);
3639 		return -ENOMEM;
3640 	}
3641 
3642 	hci_send_frame(hdev, skb);
3643 
3644 	return 0;
3645 }
3646 EXPORT_SYMBOL(__hci_cmd_send);
3647 
3648 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3649 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3650 {
3651 	struct hci_command_hdr *hdr;
3652 
3653 	if (!hdev->sent_cmd)
3654 		return NULL;
3655 
3656 	hdr = (void *) hdev->sent_cmd->data;
3657 
3658 	if (hdr->opcode != cpu_to_le16(opcode))
3659 		return NULL;
3660 
3661 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3662 
3663 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3664 }
3665 
3666 /* Send HCI command and wait for command commplete event */
hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)3667 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3668 			     const void *param, u32 timeout)
3669 {
3670 	struct sk_buff *skb;
3671 
3672 	if (!test_bit(HCI_UP, &hdev->flags))
3673 		return ERR_PTR(-ENETDOWN);
3674 
3675 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3676 
3677 	hci_req_sync_lock(hdev);
3678 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3679 	hci_req_sync_unlock(hdev);
3680 
3681 	return skb;
3682 }
3683 EXPORT_SYMBOL(hci_cmd_sync);
3684 
3685 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3686 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3687 {
3688 	struct hci_acl_hdr *hdr;
3689 	int len = skb->len;
3690 
3691 	skb_push(skb, HCI_ACL_HDR_SIZE);
3692 	skb_reset_transport_header(skb);
3693 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3694 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3695 	hdr->dlen   = cpu_to_le16(len);
3696 }
3697 
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3698 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3699 			  struct sk_buff *skb, __u16 flags)
3700 {
3701 	struct hci_conn *conn = chan->conn;
3702 	struct hci_dev *hdev = conn->hdev;
3703 	struct sk_buff *list;
3704 
3705 	skb->len = skb_headlen(skb);
3706 	skb->data_len = 0;
3707 
3708 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3709 
3710 	switch (hdev->dev_type) {
3711 	case HCI_PRIMARY:
3712 		hci_add_acl_hdr(skb, conn->handle, flags);
3713 		break;
3714 	case HCI_AMP:
3715 		hci_add_acl_hdr(skb, chan->handle, flags);
3716 		break;
3717 	default:
3718 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3719 		return;
3720 	}
3721 
3722 	list = skb_shinfo(skb)->frag_list;
3723 	if (!list) {
3724 		/* Non fragmented */
3725 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3726 
3727 		skb_queue_tail(queue, skb);
3728 	} else {
3729 		/* Fragmented */
3730 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3731 
3732 		skb_shinfo(skb)->frag_list = NULL;
3733 
3734 		/* Queue all fragments atomically. We need to use spin_lock_bh
3735 		 * here because of 6LoWPAN links, as there this function is
3736 		 * called from softirq and using normal spin lock could cause
3737 		 * deadlocks.
3738 		 */
3739 		spin_lock_bh(&queue->lock);
3740 
3741 		__skb_queue_tail(queue, skb);
3742 
3743 		flags &= ~ACL_START;
3744 		flags |= ACL_CONT;
3745 		do {
3746 			skb = list; list = list->next;
3747 
3748 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3749 			hci_add_acl_hdr(skb, conn->handle, flags);
3750 
3751 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3752 
3753 			__skb_queue_tail(queue, skb);
3754 		} while (list);
3755 
3756 		spin_unlock_bh(&queue->lock);
3757 	}
3758 }
3759 
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3760 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3761 {
3762 	struct hci_dev *hdev = chan->conn->hdev;
3763 
3764 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3765 
3766 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3767 
3768 	queue_work(hdev->workqueue, &hdev->tx_work);
3769 }
3770 
3771 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3772 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3773 {
3774 	struct hci_dev *hdev = conn->hdev;
3775 	struct hci_sco_hdr hdr;
3776 
3777 	BT_DBG("%s len %d", hdev->name, skb->len);
3778 
3779 	hdr.handle = cpu_to_le16(conn->handle);
3780 	hdr.dlen   = skb->len;
3781 
3782 	skb_push(skb, HCI_SCO_HDR_SIZE);
3783 	skb_reset_transport_header(skb);
3784 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3785 
3786 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3787 
3788 	skb_queue_tail(&conn->data_q, skb);
3789 	queue_work(hdev->workqueue, &hdev->tx_work);
3790 }
3791 
3792 /* ---- HCI TX task (outgoing data) ---- */
3793 
3794 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3795 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3796 				     int *quote)
3797 {
3798 	struct hci_conn_hash *h = &hdev->conn_hash;
3799 	struct hci_conn *conn = NULL, *c;
3800 	unsigned int num = 0, min = ~0;
3801 
3802 	/* We don't have to lock device here. Connections are always
3803 	 * added and removed with TX task disabled. */
3804 
3805 	rcu_read_lock();
3806 
3807 	list_for_each_entry_rcu(c, &h->list, list) {
3808 		if (c->type != type || skb_queue_empty(&c->data_q))
3809 			continue;
3810 
3811 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3812 			continue;
3813 
3814 		num++;
3815 
3816 		if (c->sent < min) {
3817 			min  = c->sent;
3818 			conn = c;
3819 		}
3820 
3821 		if (hci_conn_num(hdev, type) == num)
3822 			break;
3823 	}
3824 
3825 	rcu_read_unlock();
3826 
3827 	if (conn) {
3828 		int cnt, q;
3829 
3830 		switch (conn->type) {
3831 		case ACL_LINK:
3832 			cnt = hdev->acl_cnt;
3833 			break;
3834 		case SCO_LINK:
3835 		case ESCO_LINK:
3836 			cnt = hdev->sco_cnt;
3837 			break;
3838 		case LE_LINK:
3839 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3840 			break;
3841 		default:
3842 			cnt = 0;
3843 			bt_dev_err(hdev, "unknown link type %d", conn->type);
3844 		}
3845 
3846 		q = cnt / num;
3847 		*quote = q ? q : 1;
3848 	} else
3849 		*quote = 0;
3850 
3851 	BT_DBG("conn %p quote %d", conn, *quote);
3852 	return conn;
3853 }
3854 
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3855 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3856 {
3857 	struct hci_conn_hash *h = &hdev->conn_hash;
3858 	struct hci_conn *c;
3859 
3860 	bt_dev_err(hdev, "link tx timeout");
3861 
3862 	rcu_read_lock();
3863 
3864 	/* Kill stalled connections */
3865 	list_for_each_entry_rcu(c, &h->list, list) {
3866 		if (c->type == type && c->sent) {
3867 			bt_dev_err(hdev, "killing stalled connection %pMR",
3868 				   &c->dst);
3869 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3870 		}
3871 	}
3872 
3873 	rcu_read_unlock();
3874 }
3875 
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3876 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3877 				      int *quote)
3878 {
3879 	struct hci_conn_hash *h = &hdev->conn_hash;
3880 	struct hci_chan *chan = NULL;
3881 	unsigned int num = 0, min = ~0, cur_prio = 0;
3882 	struct hci_conn *conn;
3883 	int cnt, q, conn_num = 0;
3884 
3885 	BT_DBG("%s", hdev->name);
3886 
3887 	rcu_read_lock();
3888 
3889 	list_for_each_entry_rcu(conn, &h->list, list) {
3890 		struct hci_chan *tmp;
3891 
3892 		if (conn->type != type)
3893 			continue;
3894 
3895 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3896 			continue;
3897 
3898 		conn_num++;
3899 
3900 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3901 			struct sk_buff *skb;
3902 
3903 			if (skb_queue_empty(&tmp->data_q))
3904 				continue;
3905 
3906 			skb = skb_peek(&tmp->data_q);
3907 			if (skb->priority < cur_prio)
3908 				continue;
3909 
3910 			if (skb->priority > cur_prio) {
3911 				num = 0;
3912 				min = ~0;
3913 				cur_prio = skb->priority;
3914 			}
3915 
3916 			num++;
3917 
3918 			if (conn->sent < min) {
3919 				min  = conn->sent;
3920 				chan = tmp;
3921 			}
3922 		}
3923 
3924 		if (hci_conn_num(hdev, type) == conn_num)
3925 			break;
3926 	}
3927 
3928 	rcu_read_unlock();
3929 
3930 	if (!chan)
3931 		return NULL;
3932 
3933 	switch (chan->conn->type) {
3934 	case ACL_LINK:
3935 		cnt = hdev->acl_cnt;
3936 		break;
3937 	case AMP_LINK:
3938 		cnt = hdev->block_cnt;
3939 		break;
3940 	case SCO_LINK:
3941 	case ESCO_LINK:
3942 		cnt = hdev->sco_cnt;
3943 		break;
3944 	case LE_LINK:
3945 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3946 		break;
3947 	default:
3948 		cnt = 0;
3949 		bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3950 	}
3951 
3952 	q = cnt / num;
3953 	*quote = q ? q : 1;
3954 	BT_DBG("chan %p quote %d", chan, *quote);
3955 	return chan;
3956 }
3957 
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3958 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3959 {
3960 	struct hci_conn_hash *h = &hdev->conn_hash;
3961 	struct hci_conn *conn;
3962 	int num = 0;
3963 
3964 	BT_DBG("%s", hdev->name);
3965 
3966 	rcu_read_lock();
3967 
3968 	list_for_each_entry_rcu(conn, &h->list, list) {
3969 		struct hci_chan *chan;
3970 
3971 		if (conn->type != type)
3972 			continue;
3973 
3974 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3975 			continue;
3976 
3977 		num++;
3978 
3979 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3980 			struct sk_buff *skb;
3981 
3982 			if (chan->sent) {
3983 				chan->sent = 0;
3984 				continue;
3985 			}
3986 
3987 			if (skb_queue_empty(&chan->data_q))
3988 				continue;
3989 
3990 			skb = skb_peek(&chan->data_q);
3991 			if (skb->priority >= HCI_PRIO_MAX - 1)
3992 				continue;
3993 
3994 			skb->priority = HCI_PRIO_MAX - 1;
3995 
3996 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3997 			       skb->priority);
3998 		}
3999 
4000 		if (hci_conn_num(hdev, type) == num)
4001 			break;
4002 	}
4003 
4004 	rcu_read_unlock();
4005 
4006 }
4007 
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)4008 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4009 {
4010 	/* Calculate count of blocks used by this packet */
4011 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4012 }
4013 
__check_timeout(struct hci_dev * hdev,unsigned int cnt)4014 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4015 {
4016 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4017 		/* ACL tx timeout must be longer than maximum
4018 		 * link supervision timeout (40.9 seconds) */
4019 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4020 				       HCI_ACL_TX_TIMEOUT))
4021 			hci_link_tx_to(hdev, ACL_LINK);
4022 	}
4023 }
4024 
hci_sched_acl_pkt(struct hci_dev * hdev)4025 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4026 {
4027 	unsigned int cnt = hdev->acl_cnt;
4028 	struct hci_chan *chan;
4029 	struct sk_buff *skb;
4030 	int quote;
4031 
4032 	__check_timeout(hdev, cnt);
4033 
4034 	while (hdev->acl_cnt &&
4035 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4036 		u32 priority = (skb_peek(&chan->data_q))->priority;
4037 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4038 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4039 			       skb->len, skb->priority);
4040 
4041 			/* Stop if priority has changed */
4042 			if (skb->priority < priority)
4043 				break;
4044 
4045 			skb = skb_dequeue(&chan->data_q);
4046 
4047 			hci_conn_enter_active_mode(chan->conn,
4048 						   bt_cb(skb)->force_active);
4049 
4050 			hci_send_frame(hdev, skb);
4051 			hdev->acl_last_tx = jiffies;
4052 
4053 			hdev->acl_cnt--;
4054 			chan->sent++;
4055 			chan->conn->sent++;
4056 		}
4057 	}
4058 
4059 	if (cnt != hdev->acl_cnt)
4060 		hci_prio_recalculate(hdev, ACL_LINK);
4061 }
4062 
hci_sched_acl_blk(struct hci_dev * hdev)4063 static void hci_sched_acl_blk(struct hci_dev *hdev)
4064 {
4065 	unsigned int cnt = hdev->block_cnt;
4066 	struct hci_chan *chan;
4067 	struct sk_buff *skb;
4068 	int quote;
4069 	u8 type;
4070 
4071 	__check_timeout(hdev, cnt);
4072 
4073 	BT_DBG("%s", hdev->name);
4074 
4075 	if (hdev->dev_type == HCI_AMP)
4076 		type = AMP_LINK;
4077 	else
4078 		type = ACL_LINK;
4079 
4080 	while (hdev->block_cnt > 0 &&
4081 	       (chan = hci_chan_sent(hdev, type, &quote))) {
4082 		u32 priority = (skb_peek(&chan->data_q))->priority;
4083 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4084 			int blocks;
4085 
4086 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4087 			       skb->len, skb->priority);
4088 
4089 			/* Stop if priority has changed */
4090 			if (skb->priority < priority)
4091 				break;
4092 
4093 			skb = skb_dequeue(&chan->data_q);
4094 
4095 			blocks = __get_blocks(hdev, skb);
4096 			if (blocks > hdev->block_cnt)
4097 				return;
4098 
4099 			hci_conn_enter_active_mode(chan->conn,
4100 						   bt_cb(skb)->force_active);
4101 
4102 			hci_send_frame(hdev, skb);
4103 			hdev->acl_last_tx = jiffies;
4104 
4105 			hdev->block_cnt -= blocks;
4106 			quote -= blocks;
4107 
4108 			chan->sent += blocks;
4109 			chan->conn->sent += blocks;
4110 		}
4111 	}
4112 
4113 	if (cnt != hdev->block_cnt)
4114 		hci_prio_recalculate(hdev, type);
4115 }
4116 
hci_sched_acl(struct hci_dev * hdev)4117 static void hci_sched_acl(struct hci_dev *hdev)
4118 {
4119 	BT_DBG("%s", hdev->name);
4120 
4121 	/* No ACL link over BR/EDR controller */
4122 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4123 		return;
4124 
4125 	/* No AMP link over AMP controller */
4126 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4127 		return;
4128 
4129 	switch (hdev->flow_ctl_mode) {
4130 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
4131 		hci_sched_acl_pkt(hdev);
4132 		break;
4133 
4134 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4135 		hci_sched_acl_blk(hdev);
4136 		break;
4137 	}
4138 }
4139 
4140 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)4141 static void hci_sched_sco(struct hci_dev *hdev)
4142 {
4143 	struct hci_conn *conn;
4144 	struct sk_buff *skb;
4145 	int quote;
4146 
4147 	BT_DBG("%s", hdev->name);
4148 
4149 	if (!hci_conn_num(hdev, SCO_LINK))
4150 		return;
4151 
4152 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4153 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4154 			BT_DBG("skb %p len %d", skb, skb->len);
4155 			hci_send_frame(hdev, skb);
4156 
4157 			conn->sent++;
4158 			if (conn->sent == ~0)
4159 				conn->sent = 0;
4160 		}
4161 	}
4162 }
4163 
hci_sched_esco(struct hci_dev * hdev)4164 static void hci_sched_esco(struct hci_dev *hdev)
4165 {
4166 	struct hci_conn *conn;
4167 	struct sk_buff *skb;
4168 	int quote;
4169 
4170 	BT_DBG("%s", hdev->name);
4171 
4172 	if (!hci_conn_num(hdev, ESCO_LINK))
4173 		return;
4174 
4175 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4176 						     &quote))) {
4177 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4178 			BT_DBG("skb %p len %d", skb, skb->len);
4179 			hci_send_frame(hdev, skb);
4180 
4181 			conn->sent++;
4182 			if (conn->sent == ~0)
4183 				conn->sent = 0;
4184 		}
4185 	}
4186 }
4187 
hci_sched_le(struct hci_dev * hdev)4188 static void hci_sched_le(struct hci_dev *hdev)
4189 {
4190 	struct hci_chan *chan;
4191 	struct sk_buff *skb;
4192 	int quote, cnt, tmp;
4193 
4194 	BT_DBG("%s", hdev->name);
4195 
4196 	if (!hci_conn_num(hdev, LE_LINK))
4197 		return;
4198 
4199 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4200 		/* LE tx timeout must be longer than maximum
4201 		 * link supervision timeout (40.9 seconds) */
4202 		if (!hdev->le_cnt && hdev->le_pkts &&
4203 		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
4204 			hci_link_tx_to(hdev, LE_LINK);
4205 	}
4206 
4207 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4208 	tmp = cnt;
4209 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4210 		u32 priority = (skb_peek(&chan->data_q))->priority;
4211 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4212 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4213 			       skb->len, skb->priority);
4214 
4215 			/* Stop if priority has changed */
4216 			if (skb->priority < priority)
4217 				break;
4218 
4219 			skb = skb_dequeue(&chan->data_q);
4220 
4221 			hci_send_frame(hdev, skb);
4222 			hdev->le_last_tx = jiffies;
4223 
4224 			cnt--;
4225 			chan->sent++;
4226 			chan->conn->sent++;
4227 		}
4228 	}
4229 
4230 	if (hdev->le_pkts)
4231 		hdev->le_cnt = cnt;
4232 	else
4233 		hdev->acl_cnt = cnt;
4234 
4235 	if (cnt != tmp)
4236 		hci_prio_recalculate(hdev, LE_LINK);
4237 }
4238 
hci_tx_work(struct work_struct * work)4239 static void hci_tx_work(struct work_struct *work)
4240 {
4241 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4242 	struct sk_buff *skb;
4243 
4244 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4245 	       hdev->sco_cnt, hdev->le_cnt);
4246 
4247 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4248 		/* Schedule queues and send stuff to HCI driver */
4249 		hci_sched_acl(hdev);
4250 		hci_sched_sco(hdev);
4251 		hci_sched_esco(hdev);
4252 		hci_sched_le(hdev);
4253 	}
4254 
4255 	/* Send next queued raw (unknown type) packet */
4256 	while ((skb = skb_dequeue(&hdev->raw_q)))
4257 		hci_send_frame(hdev, skb);
4258 }
4259 
4260 /* ----- HCI RX task (incoming data processing) ----- */
4261 
4262 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)4263 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4264 {
4265 	struct hci_acl_hdr *hdr = (void *) skb->data;
4266 	struct hci_conn *conn;
4267 	__u16 handle, flags;
4268 
4269 	skb_pull(skb, HCI_ACL_HDR_SIZE);
4270 
4271 	handle = __le16_to_cpu(hdr->handle);
4272 	flags  = hci_flags(handle);
4273 	handle = hci_handle(handle);
4274 
4275 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4276 	       handle, flags);
4277 
4278 	hdev->stat.acl_rx++;
4279 
4280 	hci_dev_lock(hdev);
4281 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4282 	hci_dev_unlock(hdev);
4283 
4284 	if (conn) {
4285 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4286 
4287 		/* Send to upper protocol */
4288 		l2cap_recv_acldata(conn, skb, flags);
4289 		return;
4290 	} else {
4291 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4292 			   handle);
4293 	}
4294 
4295 	kfree_skb(skb);
4296 }
4297 
4298 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)4299 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4300 {
4301 	struct hci_sco_hdr *hdr = (void *) skb->data;
4302 	struct hci_conn *conn;
4303 	__u16 handle;
4304 
4305 	skb_pull(skb, HCI_SCO_HDR_SIZE);
4306 
4307 	handle = __le16_to_cpu(hdr->handle);
4308 
4309 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4310 
4311 	hdev->stat.sco_rx++;
4312 
4313 	hci_dev_lock(hdev);
4314 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4315 	hci_dev_unlock(hdev);
4316 
4317 	if (conn) {
4318 		/* Send to upper protocol */
4319 		sco_recv_scodata(conn, skb);
4320 		return;
4321 	} else {
4322 		bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4323 			   handle);
4324 	}
4325 
4326 	kfree_skb(skb);
4327 }
4328 
hci_req_is_complete(struct hci_dev * hdev)4329 static bool hci_req_is_complete(struct hci_dev *hdev)
4330 {
4331 	struct sk_buff *skb;
4332 
4333 	skb = skb_peek(&hdev->cmd_q);
4334 	if (!skb)
4335 		return true;
4336 
4337 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4338 }
4339 
hci_resend_last(struct hci_dev * hdev)4340 static void hci_resend_last(struct hci_dev *hdev)
4341 {
4342 	struct hci_command_hdr *sent;
4343 	struct sk_buff *skb;
4344 	u16 opcode;
4345 
4346 	if (!hdev->sent_cmd)
4347 		return;
4348 
4349 	sent = (void *) hdev->sent_cmd->data;
4350 	opcode = __le16_to_cpu(sent->opcode);
4351 	if (opcode == HCI_OP_RESET)
4352 		return;
4353 
4354 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4355 	if (!skb)
4356 		return;
4357 
4358 	skb_queue_head(&hdev->cmd_q, skb);
4359 	queue_work(hdev->workqueue, &hdev->cmd_work);
4360 }
4361 
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4362 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4363 			  hci_req_complete_t *req_complete,
4364 			  hci_req_complete_skb_t *req_complete_skb)
4365 {
4366 	struct sk_buff *skb;
4367 	unsigned long flags;
4368 
4369 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4370 
4371 	/* If the completed command doesn't match the last one that was
4372 	 * sent we need to do special handling of it.
4373 	 */
4374 	if (!hci_sent_cmd_data(hdev, opcode)) {
4375 		/* Some CSR based controllers generate a spontaneous
4376 		 * reset complete event during init and any pending
4377 		 * command will never be completed. In such a case we
4378 		 * need to resend whatever was the last sent
4379 		 * command.
4380 		 */
4381 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4382 			hci_resend_last(hdev);
4383 
4384 		return;
4385 	}
4386 
4387 	/* If we reach this point this event matches the last command sent */
4388 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4389 
4390 	/* If the command succeeded and there's still more commands in
4391 	 * this request the request is not yet complete.
4392 	 */
4393 	if (!status && !hci_req_is_complete(hdev))
4394 		return;
4395 
4396 	/* If this was the last command in a request the complete
4397 	 * callback would be found in hdev->sent_cmd instead of the
4398 	 * command queue (hdev->cmd_q).
4399 	 */
4400 	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4401 		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4402 		return;
4403 	}
4404 
4405 	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4406 		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4407 		return;
4408 	}
4409 
4410 	/* Remove all pending commands belonging to this request */
4411 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4412 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4413 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4414 			__skb_queue_head(&hdev->cmd_q, skb);
4415 			break;
4416 		}
4417 
4418 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4419 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4420 		else
4421 			*req_complete = bt_cb(skb)->hci.req_complete;
4422 		kfree_skb(skb);
4423 	}
4424 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4425 }
4426 
hci_rx_work(struct work_struct * work)4427 static void hci_rx_work(struct work_struct *work)
4428 {
4429 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4430 	struct sk_buff *skb;
4431 
4432 	BT_DBG("%s", hdev->name);
4433 
4434 	while ((skb = skb_dequeue(&hdev->rx_q))) {
4435 		/* Send copy to monitor */
4436 		hci_send_to_monitor(hdev, skb);
4437 
4438 		if (atomic_read(&hdev->promisc)) {
4439 			/* Send copy to the sockets */
4440 			hci_send_to_sock(hdev, skb);
4441 		}
4442 
4443 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4444 			kfree_skb(skb);
4445 			continue;
4446 		}
4447 
4448 		if (test_bit(HCI_INIT, &hdev->flags)) {
4449 			/* Don't process data packets in this states. */
4450 			switch (hci_skb_pkt_type(skb)) {
4451 			case HCI_ACLDATA_PKT:
4452 			case HCI_SCODATA_PKT:
4453 				kfree_skb(skb);
4454 				continue;
4455 			}
4456 		}
4457 
4458 		/* Process frame */
4459 		switch (hci_skb_pkt_type(skb)) {
4460 		case HCI_EVENT_PKT:
4461 			BT_DBG("%s Event packet", hdev->name);
4462 			hci_event_packet(hdev, skb);
4463 			break;
4464 
4465 		case HCI_ACLDATA_PKT:
4466 			BT_DBG("%s ACL data packet", hdev->name);
4467 			hci_acldata_packet(hdev, skb);
4468 			break;
4469 
4470 		case HCI_SCODATA_PKT:
4471 			BT_DBG("%s SCO data packet", hdev->name);
4472 			hci_scodata_packet(hdev, skb);
4473 			break;
4474 
4475 		default:
4476 			kfree_skb(skb);
4477 			break;
4478 		}
4479 	}
4480 }
4481 
hci_cmd_work(struct work_struct * work)4482 static void hci_cmd_work(struct work_struct *work)
4483 {
4484 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4485 	struct sk_buff *skb;
4486 
4487 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4488 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4489 
4490 	/* Send queued commands */
4491 	if (atomic_read(&hdev->cmd_cnt)) {
4492 		skb = skb_dequeue(&hdev->cmd_q);
4493 		if (!skb)
4494 			return;
4495 
4496 		kfree_skb(hdev->sent_cmd);
4497 
4498 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4499 		if (hdev->sent_cmd) {
4500 			if (hci_req_status_pend(hdev))
4501 				hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4502 			atomic_dec(&hdev->cmd_cnt);
4503 			hci_send_frame(hdev, skb);
4504 			if (test_bit(HCI_RESET, &hdev->flags))
4505 				cancel_delayed_work(&hdev->cmd_timer);
4506 			else
4507 				schedule_delayed_work(&hdev->cmd_timer,
4508 						      HCI_CMD_TIMEOUT);
4509 		} else {
4510 			skb_queue_head(&hdev->cmd_q, skb);
4511 			queue_work(hdev->workqueue, &hdev->cmd_work);
4512 		}
4513 	}
4514 }
4515