1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
dut_mode_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64 {
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
68 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
dut_mode_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76 {
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
79 bool enable;
80 int err;
81
82 if (!test_bit(HCI_UP, &hdev->flags))
83 return -ENETDOWN;
84
85 err = kstrtobool_from_user(user_buf, count, &enable);
86 if (err)
87 return err;
88
89 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
90 return -EALREADY;
91
92 hci_req_sync_lock(hdev);
93 if (enable)
94 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
95 HCI_CMD_TIMEOUT);
96 else
97 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
98 HCI_CMD_TIMEOUT);
99 hci_req_sync_unlock(hdev);
100
101 if (IS_ERR(skb))
102 return PTR_ERR(skb);
103
104 kfree_skb(skb);
105
106 hci_dev_change_flag(hdev, HCI_DUT_MODE);
107
108 return count;
109 }
110
111 static const struct file_operations dut_mode_fops = {
112 .open = simple_open,
113 .read = dut_mode_read,
114 .write = dut_mode_write,
115 .llseek = default_llseek,
116 };
117
vendor_diag_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)118 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
119 size_t count, loff_t *ppos)
120 {
121 struct hci_dev *hdev = file->private_data;
122 char buf[3];
123
124 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
125 buf[1] = '\n';
126 buf[2] = '\0';
127 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
128 }
129
vendor_diag_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)130 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
131 size_t count, loff_t *ppos)
132 {
133 struct hci_dev *hdev = file->private_data;
134 bool enable;
135 int err;
136
137 err = kstrtobool_from_user(user_buf, count, &enable);
138 if (err)
139 return err;
140
141 /* When the diagnostic flags are not persistent and the transport
142 * is not active or in user channel operation, then there is no need
143 * for the vendor callback. Instead just store the desired value and
144 * the setting will be programmed when the controller gets powered on.
145 */
146 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
147 (!test_bit(HCI_RUNNING, &hdev->flags) ||
148 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
149 goto done;
150
151 hci_req_sync_lock(hdev);
152 err = hdev->set_diag(hdev, enable);
153 hci_req_sync_unlock(hdev);
154
155 if (err < 0)
156 return err;
157
158 done:
159 if (enable)
160 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
161 else
162 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
163
164 return count;
165 }
166
167 static const struct file_operations vendor_diag_fops = {
168 .open = simple_open,
169 .read = vendor_diag_read,
170 .write = vendor_diag_write,
171 .llseek = default_llseek,
172 };
173
hci_debugfs_create_basic(struct hci_dev * hdev)174 static void hci_debugfs_create_basic(struct hci_dev *hdev)
175 {
176 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
177 &dut_mode_fops);
178
179 if (hdev->set_diag)
180 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
181 &vendor_diag_fops);
182 }
183
hci_reset_req(struct hci_request * req,unsigned long opt)184 static int hci_reset_req(struct hci_request *req, unsigned long opt)
185 {
186 BT_DBG("%s %ld", req->hdev->name, opt);
187
188 /* Reset device */
189 set_bit(HCI_RESET, &req->hdev->flags);
190 hci_req_add(req, HCI_OP_RESET, 0, NULL);
191 return 0;
192 }
193
bredr_init(struct hci_request * req)194 static void bredr_init(struct hci_request *req)
195 {
196 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198 /* Read Local Supported Features */
199 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
200
201 /* Read Local Version */
202 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
203
204 /* Read BD Address */
205 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
206 }
207
amp_init1(struct hci_request * req)208 static void amp_init1(struct hci_request *req)
209 {
210 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
211
212 /* Read Local Version */
213 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215 /* Read Local Supported Commands */
216 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
217
218 /* Read Local AMP Info */
219 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
220
221 /* Read Data Blk size */
222 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
223
224 /* Read Flow Control Mode */
225 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
226
227 /* Read Location Data */
228 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
229 }
230
amp_init2(struct hci_request * req)231 static int amp_init2(struct hci_request *req)
232 {
233 /* Read Local Supported Features. Not all AMP controllers
234 * support this so it's placed conditionally in the second
235 * stage init.
236 */
237 if (req->hdev->commands[14] & 0x20)
238 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
239
240 return 0;
241 }
242
hci_init1_req(struct hci_request * req,unsigned long opt)243 static int hci_init1_req(struct hci_request *req, unsigned long opt)
244 {
245 struct hci_dev *hdev = req->hdev;
246
247 BT_DBG("%s %ld", hdev->name, opt);
248
249 /* Reset */
250 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
251 hci_reset_req(req, 0);
252
253 switch (hdev->dev_type) {
254 case HCI_PRIMARY:
255 bredr_init(req);
256 break;
257 case HCI_AMP:
258 amp_init1(req);
259 break;
260 default:
261 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
262 break;
263 }
264
265 return 0;
266 }
267
bredr_setup(struct hci_request * req)268 static void bredr_setup(struct hci_request *req)
269 {
270 __le16 param;
271 __u8 flt_type;
272
273 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
274 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
275
276 /* Read Class of Device */
277 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
278
279 /* Read Local Name */
280 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
281
282 /* Read Voice Setting */
283 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
284
285 /* Read Number of Supported IAC */
286 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
287
288 /* Read Current IAC LAP */
289 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
290
291 /* Clear Event Filters */
292 flt_type = HCI_FLT_CLEAR_ALL;
293 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
294
295 /* Connection accept timeout ~20 secs */
296 param = cpu_to_le16(0x7d00);
297 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
298 }
299
le_setup(struct hci_request * req)300 static void le_setup(struct hci_request *req)
301 {
302 struct hci_dev *hdev = req->hdev;
303
304 /* Read LE Buffer Size */
305 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
306
307 /* Read LE Local Supported Features */
308 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
309
310 /* Read LE Supported States */
311 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
312
313 /* LE-only controllers have LE implicitly enabled */
314 if (!lmp_bredr_capable(hdev))
315 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
316 }
317
hci_setup_event_mask(struct hci_request * req)318 static void hci_setup_event_mask(struct hci_request *req)
319 {
320 struct hci_dev *hdev = req->hdev;
321
322 /* The second byte is 0xff instead of 0x9f (two reserved bits
323 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
324 * command otherwise.
325 */
326 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
327
328 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
329 * any event mask for pre 1.2 devices.
330 */
331 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
332 return;
333
334 if (lmp_bredr_capable(hdev)) {
335 events[4] |= 0x01; /* Flow Specification Complete */
336 } else {
337 /* Use a different default for LE-only devices */
338 memset(events, 0, sizeof(events));
339 events[1] |= 0x20; /* Command Complete */
340 events[1] |= 0x40; /* Command Status */
341 events[1] |= 0x80; /* Hardware Error */
342
343 /* If the controller supports the Disconnect command, enable
344 * the corresponding event. In addition enable packet flow
345 * control related events.
346 */
347 if (hdev->commands[0] & 0x20) {
348 events[0] |= 0x10; /* Disconnection Complete */
349 events[2] |= 0x04; /* Number of Completed Packets */
350 events[3] |= 0x02; /* Data Buffer Overflow */
351 }
352
353 /* If the controller supports the Read Remote Version
354 * Information command, enable the corresponding event.
355 */
356 if (hdev->commands[2] & 0x80)
357 events[1] |= 0x08; /* Read Remote Version Information
358 * Complete
359 */
360
361 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
362 events[0] |= 0x80; /* Encryption Change */
363 events[5] |= 0x80; /* Encryption Key Refresh Complete */
364 }
365 }
366
367 if (lmp_inq_rssi_capable(hdev) ||
368 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
369 events[4] |= 0x02; /* Inquiry Result with RSSI */
370
371 if (lmp_ext_feat_capable(hdev))
372 events[4] |= 0x04; /* Read Remote Extended Features Complete */
373
374 if (lmp_esco_capable(hdev)) {
375 events[5] |= 0x08; /* Synchronous Connection Complete */
376 events[5] |= 0x10; /* Synchronous Connection Changed */
377 }
378
379 if (lmp_sniffsubr_capable(hdev))
380 events[5] |= 0x20; /* Sniff Subrating */
381
382 if (lmp_pause_enc_capable(hdev))
383 events[5] |= 0x80; /* Encryption Key Refresh Complete */
384
385 if (lmp_ext_inq_capable(hdev))
386 events[5] |= 0x40; /* Extended Inquiry Result */
387
388 if (lmp_no_flush_capable(hdev))
389 events[7] |= 0x01; /* Enhanced Flush Complete */
390
391 if (lmp_lsto_capable(hdev))
392 events[6] |= 0x80; /* Link Supervision Timeout Changed */
393
394 if (lmp_ssp_capable(hdev)) {
395 events[6] |= 0x01; /* IO Capability Request */
396 events[6] |= 0x02; /* IO Capability Response */
397 events[6] |= 0x04; /* User Confirmation Request */
398 events[6] |= 0x08; /* User Passkey Request */
399 events[6] |= 0x10; /* Remote OOB Data Request */
400 events[6] |= 0x20; /* Simple Pairing Complete */
401 events[7] |= 0x04; /* User Passkey Notification */
402 events[7] |= 0x08; /* Keypress Notification */
403 events[7] |= 0x10; /* Remote Host Supported
404 * Features Notification
405 */
406 }
407
408 if (lmp_le_capable(hdev))
409 events[7] |= 0x20; /* LE Meta-Event */
410
411 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
412 }
413
hci_init2_req(struct hci_request * req,unsigned long opt)414 static int hci_init2_req(struct hci_request *req, unsigned long opt)
415 {
416 struct hci_dev *hdev = req->hdev;
417
418 if (hdev->dev_type == HCI_AMP)
419 return amp_init2(req);
420
421 if (lmp_bredr_capable(hdev))
422 bredr_setup(req);
423 else
424 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
425
426 if (lmp_le_capable(hdev))
427 le_setup(req);
428
429 /* All Bluetooth 1.2 and later controllers should support the
430 * HCI command for reading the local supported commands.
431 *
432 * Unfortunately some controllers indicate Bluetooth 1.2 support,
433 * but do not have support for this command. If that is the case,
434 * the driver can quirk the behavior and skip reading the local
435 * supported commands.
436 */
437 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
438 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
439 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
440
441 if (lmp_ssp_capable(hdev)) {
442 /* When SSP is available, then the host features page
443 * should also be available as well. However some
444 * controllers list the max_page as 0 as long as SSP
445 * has not been enabled. To achieve proper debugging
446 * output, force the minimum max_page to 1 at least.
447 */
448 hdev->max_page = 0x01;
449
450 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
451 u8 mode = 0x01;
452
453 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
454 sizeof(mode), &mode);
455 } else {
456 struct hci_cp_write_eir cp;
457
458 memset(hdev->eir, 0, sizeof(hdev->eir));
459 memset(&cp, 0, sizeof(cp));
460
461 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
462 }
463 }
464
465 if (lmp_inq_rssi_capable(hdev) ||
466 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
467 u8 mode;
468
469 /* If Extended Inquiry Result events are supported, then
470 * they are clearly preferred over Inquiry Result with RSSI
471 * events.
472 */
473 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
474
475 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
476 }
477
478 if (lmp_inq_tx_pwr_capable(hdev))
479 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
480
481 if (lmp_ext_feat_capable(hdev)) {
482 struct hci_cp_read_local_ext_features cp;
483
484 cp.page = 0x01;
485 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
486 sizeof(cp), &cp);
487 }
488
489 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
490 u8 enable = 1;
491 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
492 &enable);
493 }
494
495 return 0;
496 }
497
hci_setup_link_policy(struct hci_request * req)498 static void hci_setup_link_policy(struct hci_request *req)
499 {
500 struct hci_dev *hdev = req->hdev;
501 struct hci_cp_write_def_link_policy cp;
502 u16 link_policy = 0;
503
504 if (lmp_rswitch_capable(hdev))
505 link_policy |= HCI_LP_RSWITCH;
506 if (lmp_hold_capable(hdev))
507 link_policy |= HCI_LP_HOLD;
508 if (lmp_sniff_capable(hdev))
509 link_policy |= HCI_LP_SNIFF;
510 if (lmp_park_capable(hdev))
511 link_policy |= HCI_LP_PARK;
512
513 cp.policy = cpu_to_le16(link_policy);
514 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
515 }
516
hci_set_le_support(struct hci_request * req)517 static void hci_set_le_support(struct hci_request *req)
518 {
519 struct hci_dev *hdev = req->hdev;
520 struct hci_cp_write_le_host_supported cp;
521
522 /* LE-only devices do not support explicit enablement */
523 if (!lmp_bredr_capable(hdev))
524 return;
525
526 memset(&cp, 0, sizeof(cp));
527
528 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
529 cp.le = 0x01;
530 cp.simul = 0x00;
531 }
532
533 if (cp.le != lmp_host_le_capable(hdev))
534 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
535 &cp);
536 }
537
hci_set_event_mask_page_2(struct hci_request * req)538 static void hci_set_event_mask_page_2(struct hci_request *req)
539 {
540 struct hci_dev *hdev = req->hdev;
541 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
542 bool changed = false;
543
544 /* If Connectionless Slave Broadcast master role is supported
545 * enable all necessary events for it.
546 */
547 if (lmp_csb_master_capable(hdev)) {
548 events[1] |= 0x40; /* Triggered Clock Capture */
549 events[1] |= 0x80; /* Synchronization Train Complete */
550 events[2] |= 0x10; /* Slave Page Response Timeout */
551 events[2] |= 0x20; /* CSB Channel Map Change */
552 changed = true;
553 }
554
555 /* If Connectionless Slave Broadcast slave role is supported
556 * enable all necessary events for it.
557 */
558 if (lmp_csb_slave_capable(hdev)) {
559 events[2] |= 0x01; /* Synchronization Train Received */
560 events[2] |= 0x02; /* CSB Receive */
561 events[2] |= 0x04; /* CSB Timeout */
562 events[2] |= 0x08; /* Truncated Page Complete */
563 changed = true;
564 }
565
566 /* Enable Authenticated Payload Timeout Expired event if supported */
567 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
568 events[2] |= 0x80;
569 changed = true;
570 }
571
572 /* Some Broadcom based controllers indicate support for Set Event
573 * Mask Page 2 command, but then actually do not support it. Since
574 * the default value is all bits set to zero, the command is only
575 * required if the event mask has to be changed. In case no change
576 * to the event mask is needed, skip this command.
577 */
578 if (changed)
579 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
580 sizeof(events), events);
581 }
582
hci_init3_req(struct hci_request * req,unsigned long opt)583 static int hci_init3_req(struct hci_request *req, unsigned long opt)
584 {
585 struct hci_dev *hdev = req->hdev;
586 u8 p;
587
588 hci_setup_event_mask(req);
589
590 if (hdev->commands[6] & 0x20 &&
591 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
592 struct hci_cp_read_stored_link_key cp;
593
594 bacpy(&cp.bdaddr, BDADDR_ANY);
595 cp.read_all = 0x01;
596 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
597 }
598
599 if (hdev->commands[5] & 0x10)
600 hci_setup_link_policy(req);
601
602 if (hdev->commands[8] & 0x01)
603 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
604
605 /* Some older Broadcom based Bluetooth 1.2 controllers do not
606 * support the Read Page Scan Type command. Check support for
607 * this command in the bit mask of supported commands.
608 */
609 if (hdev->commands[13] & 0x01)
610 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
611
612 if (lmp_le_capable(hdev)) {
613 u8 events[8];
614
615 memset(events, 0, sizeof(events));
616
617 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
618 events[0] |= 0x10; /* LE Long Term Key Request */
619
620 /* If controller supports the Connection Parameters Request
621 * Link Layer Procedure, enable the corresponding event.
622 */
623 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
624 events[0] |= 0x20; /* LE Remote Connection
625 * Parameter Request
626 */
627
628 /* If the controller supports the Data Length Extension
629 * feature, enable the corresponding event.
630 */
631 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
632 events[0] |= 0x40; /* LE Data Length Change */
633
634 /* If the controller supports Extended Scanner Filter
635 * Policies, enable the correspondig event.
636 */
637 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
638 events[1] |= 0x04; /* LE Direct Advertising
639 * Report
640 */
641
642 /* If the controller supports Channel Selection Algorithm #2
643 * feature, enable the corresponding event.
644 */
645 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
646 events[2] |= 0x08; /* LE Channel Selection
647 * Algorithm
648 */
649
650 /* If the controller supports the LE Set Scan Enable command,
651 * enable the corresponding advertising report event.
652 */
653 if (hdev->commands[26] & 0x08)
654 events[0] |= 0x02; /* LE Advertising Report */
655
656 /* If the controller supports the LE Create Connection
657 * command, enable the corresponding event.
658 */
659 if (hdev->commands[26] & 0x10)
660 events[0] |= 0x01; /* LE Connection Complete */
661
662 /* If the controller supports the LE Connection Update
663 * command, enable the corresponding event.
664 */
665 if (hdev->commands[27] & 0x04)
666 events[0] |= 0x04; /* LE Connection Update
667 * Complete
668 */
669
670 /* If the controller supports the LE Read Remote Used Features
671 * command, enable the corresponding event.
672 */
673 if (hdev->commands[27] & 0x20)
674 events[0] |= 0x08; /* LE Read Remote Used
675 * Features Complete
676 */
677
678 /* If the controller supports the LE Read Local P-256
679 * Public Key command, enable the corresponding event.
680 */
681 if (hdev->commands[34] & 0x02)
682 events[0] |= 0x80; /* LE Read Local P-256
683 * Public Key Complete
684 */
685
686 /* If the controller supports the LE Generate DHKey
687 * command, enable the corresponding event.
688 */
689 if (hdev->commands[34] & 0x04)
690 events[1] |= 0x01; /* LE Generate DHKey Complete */
691
692 /* If the controller supports the LE Set Default PHY or
693 * LE Set PHY commands, enable the corresponding event.
694 */
695 if (hdev->commands[35] & (0x20 | 0x40))
696 events[1] |= 0x08; /* LE PHY Update Complete */
697
698 /* If the controller supports LE Set Extended Scan Parameters
699 * and LE Set Extended Scan Enable commands, enable the
700 * corresponding event.
701 */
702 if (use_ext_scan(hdev))
703 events[1] |= 0x10; /* LE Extended Advertising
704 * Report
705 */
706
707 /* If the controller supports the LE Extended Create Connection
708 * command, enable the corresponding event.
709 */
710 if (use_ext_conn(hdev))
711 events[1] |= 0x02; /* LE Enhanced Connection
712 * Complete
713 */
714
715 /* If the controller supports the LE Extended Advertising
716 * command, enable the corresponding event.
717 */
718 if (ext_adv_capable(hdev))
719 events[2] |= 0x02; /* LE Advertising Set
720 * Terminated
721 */
722
723 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
724 events);
725
726 /* Read LE Advertising Channel TX Power */
727 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
728 /* HCI TS spec forbids mixing of legacy and extended
729 * advertising commands wherein READ_ADV_TX_POWER is
730 * also included. So do not call it if extended adv
731 * is supported otherwise controller will return
732 * COMMAND_DISALLOWED for extended commands.
733 */
734 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
735 }
736
737 if (hdev->commands[26] & 0x40) {
738 /* Read LE White List Size */
739 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
740 0, NULL);
741 }
742
743 if (hdev->commands[26] & 0x80) {
744 /* Clear LE White List */
745 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
746 }
747
748 if (hdev->commands[34] & 0x40) {
749 /* Read LE Resolving List Size */
750 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
751 0, NULL);
752 }
753
754 if (hdev->commands[34] & 0x20) {
755 /* Clear LE Resolving List */
756 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
757 }
758
759 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
760 /* Read LE Maximum Data Length */
761 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
762
763 /* Read LE Suggested Default Data Length */
764 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
765 }
766
767 if (ext_adv_capable(hdev)) {
768 /* Read LE Number of Supported Advertising Sets */
769 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
770 0, NULL);
771 }
772
773 hci_set_le_support(req);
774 }
775
776 /* Read features beyond page 1 if available */
777 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
778 struct hci_cp_read_local_ext_features cp;
779
780 cp.page = p;
781 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
782 sizeof(cp), &cp);
783 }
784
785 return 0;
786 }
787
hci_init4_req(struct hci_request * req,unsigned long opt)788 static int hci_init4_req(struct hci_request *req, unsigned long opt)
789 {
790 struct hci_dev *hdev = req->hdev;
791
792 /* Some Broadcom based Bluetooth controllers do not support the
793 * Delete Stored Link Key command. They are clearly indicating its
794 * absence in the bit mask of supported commands.
795 *
796 * Check the supported commands and only if the the command is marked
797 * as supported send it. If not supported assume that the controller
798 * does not have actual support for stored link keys which makes this
799 * command redundant anyway.
800 *
801 * Some controllers indicate that they support handling deleting
802 * stored link keys, but they don't. The quirk lets a driver
803 * just disable this command.
804 */
805 if (hdev->commands[6] & 0x80 &&
806 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
807 struct hci_cp_delete_stored_link_key cp;
808
809 bacpy(&cp.bdaddr, BDADDR_ANY);
810 cp.delete_all = 0x01;
811 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
812 sizeof(cp), &cp);
813 }
814
815 /* Set event mask page 2 if the HCI command for it is supported */
816 if (hdev->commands[22] & 0x04)
817 hci_set_event_mask_page_2(req);
818
819 /* Read local codec list if the HCI command is supported */
820 if (hdev->commands[29] & 0x20)
821 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
822
823 /* Get MWS transport configuration if the HCI command is supported */
824 if (hdev->commands[30] & 0x08)
825 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
826
827 /* Check for Synchronization Train support */
828 if (lmp_sync_train_capable(hdev))
829 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
830
831 /* Enable Secure Connections if supported and configured */
832 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
833 bredr_sc_enabled(hdev)) {
834 u8 support = 0x01;
835
836 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
837 sizeof(support), &support);
838 }
839
840 /* Set Suggested Default Data Length to maximum if supported */
841 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
842 struct hci_cp_le_write_def_data_len cp;
843
844 cp.tx_len = hdev->le_max_tx_len;
845 cp.tx_time = hdev->le_max_tx_time;
846 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
847 }
848
849 /* Set Default PHY parameters if command is supported */
850 if (hdev->commands[35] & 0x20) {
851 struct hci_cp_le_set_default_phy cp;
852
853 cp.all_phys = 0x00;
854 cp.tx_phys = hdev->le_tx_def_phys;
855 cp.rx_phys = hdev->le_rx_def_phys;
856
857 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
858 }
859
860 return 0;
861 }
862
__hci_init(struct hci_dev * hdev)863 static int __hci_init(struct hci_dev *hdev)
864 {
865 int err;
866
867 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
868 if (err < 0)
869 return err;
870
871 if (hci_dev_test_flag(hdev, HCI_SETUP))
872 hci_debugfs_create_basic(hdev);
873
874 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
875 if (err < 0)
876 return err;
877
878 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
879 * BR/EDR/LE type controllers. AMP controllers only need the
880 * first two stages of init.
881 */
882 if (hdev->dev_type != HCI_PRIMARY)
883 return 0;
884
885 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
886 if (err < 0)
887 return err;
888
889 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
890 if (err < 0)
891 return err;
892
893 /* This function is only called when the controller is actually in
894 * configured state. When the controller is marked as unconfigured,
895 * this initialization procedure is not run.
896 *
897 * It means that it is possible that a controller runs through its
898 * setup phase and then discovers missing settings. If that is the
899 * case, then this function will not be called. It then will only
900 * be called during the config phase.
901 *
902 * So only when in setup phase or config phase, create the debugfs
903 * entries and register the SMP channels.
904 */
905 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
906 !hci_dev_test_flag(hdev, HCI_CONFIG))
907 return 0;
908
909 hci_debugfs_create_common(hdev);
910
911 if (lmp_bredr_capable(hdev))
912 hci_debugfs_create_bredr(hdev);
913
914 if (lmp_le_capable(hdev))
915 hci_debugfs_create_le(hdev);
916
917 return 0;
918 }
919
hci_init0_req(struct hci_request * req,unsigned long opt)920 static int hci_init0_req(struct hci_request *req, unsigned long opt)
921 {
922 struct hci_dev *hdev = req->hdev;
923
924 BT_DBG("%s %ld", hdev->name, opt);
925
926 /* Reset */
927 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
928 hci_reset_req(req, 0);
929
930 /* Read Local Version */
931 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
932
933 /* Read BD Address */
934 if (hdev->set_bdaddr)
935 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
936
937 return 0;
938 }
939
__hci_unconf_init(struct hci_dev * hdev)940 static int __hci_unconf_init(struct hci_dev *hdev)
941 {
942 int err;
943
944 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
945 return 0;
946
947 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
948 if (err < 0)
949 return err;
950
951 if (hci_dev_test_flag(hdev, HCI_SETUP))
952 hci_debugfs_create_basic(hdev);
953
954 return 0;
955 }
956
hci_scan_req(struct hci_request * req,unsigned long opt)957 static int hci_scan_req(struct hci_request *req, unsigned long opt)
958 {
959 __u8 scan = opt;
960
961 BT_DBG("%s %x", req->hdev->name, scan);
962
963 /* Inquiry and Page scans */
964 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
965 return 0;
966 }
967
hci_auth_req(struct hci_request * req,unsigned long opt)968 static int hci_auth_req(struct hci_request *req, unsigned long opt)
969 {
970 __u8 auth = opt;
971
972 BT_DBG("%s %x", req->hdev->name, auth);
973
974 /* Authentication */
975 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
976 return 0;
977 }
978
hci_encrypt_req(struct hci_request * req,unsigned long opt)979 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
980 {
981 __u8 encrypt = opt;
982
983 BT_DBG("%s %x", req->hdev->name, encrypt);
984
985 /* Encryption */
986 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
987 return 0;
988 }
989
hci_linkpol_req(struct hci_request * req,unsigned long opt)990 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
991 {
992 __le16 policy = cpu_to_le16(opt);
993
994 BT_DBG("%s %x", req->hdev->name, policy);
995
996 /* Default link policy */
997 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
998 return 0;
999 }
1000
1001 /* Get HCI device by index.
1002 * Device is held on return. */
hci_dev_get(int index)1003 struct hci_dev *hci_dev_get(int index)
1004 {
1005 struct hci_dev *hdev = NULL, *d;
1006
1007 BT_DBG("%d", index);
1008
1009 if (index < 0)
1010 return NULL;
1011
1012 read_lock(&hci_dev_list_lock);
1013 list_for_each_entry(d, &hci_dev_list, list) {
1014 if (d->id == index) {
1015 hdev = hci_dev_hold(d);
1016 break;
1017 }
1018 }
1019 read_unlock(&hci_dev_list_lock);
1020 return hdev;
1021 }
1022
1023 /* ---- Inquiry support ---- */
1024
hci_discovery_active(struct hci_dev * hdev)1025 bool hci_discovery_active(struct hci_dev *hdev)
1026 {
1027 struct discovery_state *discov = &hdev->discovery;
1028
1029 switch (discov->state) {
1030 case DISCOVERY_FINDING:
1031 case DISCOVERY_RESOLVING:
1032 return true;
1033
1034 default:
1035 return false;
1036 }
1037 }
1038
hci_discovery_set_state(struct hci_dev * hdev,int state)1039 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1040 {
1041 int old_state = hdev->discovery.state;
1042
1043 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1044
1045 if (old_state == state)
1046 return;
1047
1048 hdev->discovery.state = state;
1049
1050 switch (state) {
1051 case DISCOVERY_STOPPED:
1052 hci_update_background_scan(hdev);
1053
1054 if (old_state != DISCOVERY_STARTING)
1055 mgmt_discovering(hdev, 0);
1056 break;
1057 case DISCOVERY_STARTING:
1058 break;
1059 case DISCOVERY_FINDING:
1060 mgmt_discovering(hdev, 1);
1061 break;
1062 case DISCOVERY_RESOLVING:
1063 break;
1064 case DISCOVERY_STOPPING:
1065 break;
1066 }
1067 }
1068
hci_inquiry_cache_flush(struct hci_dev * hdev)1069 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1070 {
1071 struct discovery_state *cache = &hdev->discovery;
1072 struct inquiry_entry *p, *n;
1073
1074 list_for_each_entry_safe(p, n, &cache->all, all) {
1075 list_del(&p->all);
1076 kfree(p);
1077 }
1078
1079 INIT_LIST_HEAD(&cache->unknown);
1080 INIT_LIST_HEAD(&cache->resolve);
1081 }
1082
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)1083 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1084 bdaddr_t *bdaddr)
1085 {
1086 struct discovery_state *cache = &hdev->discovery;
1087 struct inquiry_entry *e;
1088
1089 BT_DBG("cache %p, %pMR", cache, bdaddr);
1090
1091 list_for_each_entry(e, &cache->all, all) {
1092 if (!bacmp(&e->data.bdaddr, bdaddr))
1093 return e;
1094 }
1095
1096 return NULL;
1097 }
1098
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)1099 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1100 bdaddr_t *bdaddr)
1101 {
1102 struct discovery_state *cache = &hdev->discovery;
1103 struct inquiry_entry *e;
1104
1105 BT_DBG("cache %p, %pMR", cache, bdaddr);
1106
1107 list_for_each_entry(e, &cache->unknown, list) {
1108 if (!bacmp(&e->data.bdaddr, bdaddr))
1109 return e;
1110 }
1111
1112 return NULL;
1113 }
1114
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)1115 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1116 bdaddr_t *bdaddr,
1117 int state)
1118 {
1119 struct discovery_state *cache = &hdev->discovery;
1120 struct inquiry_entry *e;
1121
1122 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1123
1124 list_for_each_entry(e, &cache->resolve, list) {
1125 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1126 return e;
1127 if (!bacmp(&e->data.bdaddr, bdaddr))
1128 return e;
1129 }
1130
1131 return NULL;
1132 }
1133
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)1134 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1135 struct inquiry_entry *ie)
1136 {
1137 struct discovery_state *cache = &hdev->discovery;
1138 struct list_head *pos = &cache->resolve;
1139 struct inquiry_entry *p;
1140
1141 list_del(&ie->list);
1142
1143 list_for_each_entry(p, &cache->resolve, list) {
1144 if (p->name_state != NAME_PENDING &&
1145 abs(p->data.rssi) >= abs(ie->data.rssi))
1146 break;
1147 pos = &p->list;
1148 }
1149
1150 list_add(&ie->list, pos);
1151 }
1152
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)1153 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1154 bool name_known)
1155 {
1156 struct discovery_state *cache = &hdev->discovery;
1157 struct inquiry_entry *ie;
1158 u32 flags = 0;
1159
1160 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1161
1162 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1163
1164 if (!data->ssp_mode)
1165 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1166
1167 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1168 if (ie) {
1169 if (!ie->data.ssp_mode)
1170 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1171
1172 if (ie->name_state == NAME_NEEDED &&
1173 data->rssi != ie->data.rssi) {
1174 ie->data.rssi = data->rssi;
1175 hci_inquiry_cache_update_resolve(hdev, ie);
1176 }
1177
1178 goto update;
1179 }
1180
1181 /* Entry not in the cache. Add new one. */
1182 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1183 if (!ie) {
1184 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1185 goto done;
1186 }
1187
1188 list_add(&ie->all, &cache->all);
1189
1190 if (name_known) {
1191 ie->name_state = NAME_KNOWN;
1192 } else {
1193 ie->name_state = NAME_NOT_KNOWN;
1194 list_add(&ie->list, &cache->unknown);
1195 }
1196
1197 update:
1198 if (name_known && ie->name_state != NAME_KNOWN &&
1199 ie->name_state != NAME_PENDING) {
1200 ie->name_state = NAME_KNOWN;
1201 list_del(&ie->list);
1202 }
1203
1204 memcpy(&ie->data, data, sizeof(*data));
1205 ie->timestamp = jiffies;
1206 cache->timestamp = jiffies;
1207
1208 if (ie->name_state == NAME_NOT_KNOWN)
1209 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1210
1211 done:
1212 return flags;
1213 }
1214
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)1215 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1216 {
1217 struct discovery_state *cache = &hdev->discovery;
1218 struct inquiry_info *info = (struct inquiry_info *) buf;
1219 struct inquiry_entry *e;
1220 int copied = 0;
1221
1222 list_for_each_entry(e, &cache->all, all) {
1223 struct inquiry_data *data = &e->data;
1224
1225 if (copied >= num)
1226 break;
1227
1228 bacpy(&info->bdaddr, &data->bdaddr);
1229 info->pscan_rep_mode = data->pscan_rep_mode;
1230 info->pscan_period_mode = data->pscan_period_mode;
1231 info->pscan_mode = data->pscan_mode;
1232 memcpy(info->dev_class, data->dev_class, 3);
1233 info->clock_offset = data->clock_offset;
1234
1235 info++;
1236 copied++;
1237 }
1238
1239 BT_DBG("cache %p, copied %d", cache, copied);
1240 return copied;
1241 }
1242
hci_inq_req(struct hci_request * req,unsigned long opt)1243 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1244 {
1245 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1246 struct hci_dev *hdev = req->hdev;
1247 struct hci_cp_inquiry cp;
1248
1249 BT_DBG("%s", hdev->name);
1250
1251 if (test_bit(HCI_INQUIRY, &hdev->flags))
1252 return 0;
1253
1254 /* Start Inquiry */
1255 memcpy(&cp.lap, &ir->lap, 3);
1256 cp.length = ir->length;
1257 cp.num_rsp = ir->num_rsp;
1258 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1259
1260 return 0;
1261 }
1262
hci_inquiry(void __user * arg)1263 int hci_inquiry(void __user *arg)
1264 {
1265 __u8 __user *ptr = arg;
1266 struct hci_inquiry_req ir;
1267 struct hci_dev *hdev;
1268 int err = 0, do_inquiry = 0, max_rsp;
1269 long timeo;
1270 __u8 *buf;
1271
1272 if (copy_from_user(&ir, ptr, sizeof(ir)))
1273 return -EFAULT;
1274
1275 hdev = hci_dev_get(ir.dev_id);
1276 if (!hdev)
1277 return -ENODEV;
1278
1279 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1280 err = -EBUSY;
1281 goto done;
1282 }
1283
1284 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1285 err = -EOPNOTSUPP;
1286 goto done;
1287 }
1288
1289 if (hdev->dev_type != HCI_PRIMARY) {
1290 err = -EOPNOTSUPP;
1291 goto done;
1292 }
1293
1294 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1295 err = -EOPNOTSUPP;
1296 goto done;
1297 }
1298
1299 hci_dev_lock(hdev);
1300 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1301 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1302 hci_inquiry_cache_flush(hdev);
1303 do_inquiry = 1;
1304 }
1305 hci_dev_unlock(hdev);
1306
1307 timeo = ir.length * msecs_to_jiffies(2000);
1308
1309 if (do_inquiry) {
1310 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1311 timeo, NULL);
1312 if (err < 0)
1313 goto done;
1314
1315 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1316 * cleared). If it is interrupted by a signal, return -EINTR.
1317 */
1318 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1319 TASK_INTERRUPTIBLE))
1320 return -EINTR;
1321 }
1322
1323 /* for unlimited number of responses we will use buffer with
1324 * 255 entries
1325 */
1326 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1327
1328 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1329 * copy it to the user space.
1330 */
1331 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1332 if (!buf) {
1333 err = -ENOMEM;
1334 goto done;
1335 }
1336
1337 hci_dev_lock(hdev);
1338 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1339 hci_dev_unlock(hdev);
1340
1341 BT_DBG("num_rsp %d", ir.num_rsp);
1342
1343 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1344 ptr += sizeof(ir);
1345 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1346 ir.num_rsp))
1347 err = -EFAULT;
1348 } else
1349 err = -EFAULT;
1350
1351 kfree(buf);
1352
1353 done:
1354 hci_dev_put(hdev);
1355 return err;
1356 }
1357
hci_dev_do_open(struct hci_dev * hdev)1358 static int hci_dev_do_open(struct hci_dev *hdev)
1359 {
1360 int ret = 0;
1361
1362 BT_DBG("%s %p", hdev->name, hdev);
1363
1364 hci_req_sync_lock(hdev);
1365
1366 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1367 ret = -ENODEV;
1368 goto done;
1369 }
1370
1371 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1372 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1373 /* Check for rfkill but allow the HCI setup stage to
1374 * proceed (which in itself doesn't cause any RF activity).
1375 */
1376 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1377 ret = -ERFKILL;
1378 goto done;
1379 }
1380
1381 /* Check for valid public address or a configured static
1382 * random adddress, but let the HCI setup proceed to
1383 * be able to determine if there is a public address
1384 * or not.
1385 *
1386 * In case of user channel usage, it is not important
1387 * if a public address or static random address is
1388 * available.
1389 *
1390 * This check is only valid for BR/EDR controllers
1391 * since AMP controllers do not have an address.
1392 */
1393 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1394 hdev->dev_type == HCI_PRIMARY &&
1395 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1396 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1397 ret = -EADDRNOTAVAIL;
1398 goto done;
1399 }
1400 }
1401
1402 if (test_bit(HCI_UP, &hdev->flags)) {
1403 ret = -EALREADY;
1404 goto done;
1405 }
1406
1407 if (hdev->open(hdev)) {
1408 ret = -EIO;
1409 goto done;
1410 }
1411
1412 set_bit(HCI_RUNNING, &hdev->flags);
1413 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1414
1415 atomic_set(&hdev->cmd_cnt, 1);
1416 set_bit(HCI_INIT, &hdev->flags);
1417
1418 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1419 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1420 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1421
1422 if (hdev->setup)
1423 ret = hdev->setup(hdev);
1424
1425 /* The transport driver can set these quirks before
1426 * creating the HCI device or in its setup callback.
1427 *
1428 * In case any of them is set, the controller has to
1429 * start up as unconfigured.
1430 */
1431 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1432 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1433 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1434
1435 /* For an unconfigured controller it is required to
1436 * read at least the version information provided by
1437 * the Read Local Version Information command.
1438 *
1439 * If the set_bdaddr driver callback is provided, then
1440 * also the original Bluetooth public device address
1441 * will be read using the Read BD Address command.
1442 */
1443 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1444 ret = __hci_unconf_init(hdev);
1445 }
1446
1447 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1448 /* If public address change is configured, ensure that
1449 * the address gets programmed. If the driver does not
1450 * support changing the public address, fail the power
1451 * on procedure.
1452 */
1453 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1454 hdev->set_bdaddr)
1455 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1456 else
1457 ret = -EADDRNOTAVAIL;
1458 }
1459
1460 if (!ret) {
1461 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1462 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1463 ret = __hci_init(hdev);
1464 if (!ret && hdev->post_init)
1465 ret = hdev->post_init(hdev);
1466 }
1467 }
1468
1469 /* If the HCI Reset command is clearing all diagnostic settings,
1470 * then they need to be reprogrammed after the init procedure
1471 * completed.
1472 */
1473 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1474 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1475 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1476 ret = hdev->set_diag(hdev, true);
1477
1478 clear_bit(HCI_INIT, &hdev->flags);
1479
1480 if (!ret) {
1481 hci_dev_hold(hdev);
1482 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1483 hci_adv_instances_set_rpa_expired(hdev, true);
1484 set_bit(HCI_UP, &hdev->flags);
1485 hci_sock_dev_event(hdev, HCI_DEV_UP);
1486 hci_leds_update_powered(hdev, true);
1487 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1488 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1489 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1490 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1491 hci_dev_test_flag(hdev, HCI_MGMT) &&
1492 hdev->dev_type == HCI_PRIMARY) {
1493 ret = __hci_req_hci_power_on(hdev);
1494 mgmt_power_on(hdev, ret);
1495 }
1496 } else {
1497 /* Init failed, cleanup */
1498 flush_work(&hdev->tx_work);
1499 flush_work(&hdev->cmd_work);
1500 flush_work(&hdev->rx_work);
1501
1502 skb_queue_purge(&hdev->cmd_q);
1503 skb_queue_purge(&hdev->rx_q);
1504
1505 if (hdev->flush)
1506 hdev->flush(hdev);
1507
1508 if (hdev->sent_cmd) {
1509 kfree_skb(hdev->sent_cmd);
1510 hdev->sent_cmd = NULL;
1511 }
1512
1513 clear_bit(HCI_RUNNING, &hdev->flags);
1514 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1515
1516 hdev->close(hdev);
1517 hdev->flags &= BIT(HCI_RAW);
1518 }
1519
1520 done:
1521 hci_req_sync_unlock(hdev);
1522 return ret;
1523 }
1524
1525 /* ---- HCI ioctl helpers ---- */
1526
hci_dev_open(__u16 dev)1527 int hci_dev_open(__u16 dev)
1528 {
1529 struct hci_dev *hdev;
1530 int err;
1531
1532 hdev = hci_dev_get(dev);
1533 if (!hdev)
1534 return -ENODEV;
1535
1536 /* Devices that are marked as unconfigured can only be powered
1537 * up as user channel. Trying to bring them up as normal devices
1538 * will result into a failure. Only user channel operation is
1539 * possible.
1540 *
1541 * When this function is called for a user channel, the flag
1542 * HCI_USER_CHANNEL will be set first before attempting to
1543 * open the device.
1544 */
1545 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1546 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1547 err = -EOPNOTSUPP;
1548 goto done;
1549 }
1550
1551 /* We need to ensure that no other power on/off work is pending
1552 * before proceeding to call hci_dev_do_open. This is
1553 * particularly important if the setup procedure has not yet
1554 * completed.
1555 */
1556 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1557 cancel_delayed_work(&hdev->power_off);
1558
1559 /* After this call it is guaranteed that the setup procedure
1560 * has finished. This means that error conditions like RFKILL
1561 * or no valid public or static random address apply.
1562 */
1563 flush_workqueue(hdev->req_workqueue);
1564
1565 /* For controllers not using the management interface and that
1566 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1567 * so that pairing works for them. Once the management interface
1568 * is in use this bit will be cleared again and userspace has
1569 * to explicitly enable it.
1570 */
1571 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1572 !hci_dev_test_flag(hdev, HCI_MGMT))
1573 hci_dev_set_flag(hdev, HCI_BONDABLE);
1574
1575 err = hci_dev_do_open(hdev);
1576
1577 done:
1578 hci_dev_put(hdev);
1579 return err;
1580 }
1581
1582 /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)1583 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1584 {
1585 struct hci_conn_params *p;
1586
1587 list_for_each_entry(p, &hdev->le_conn_params, list) {
1588 if (p->conn) {
1589 hci_conn_drop(p->conn);
1590 hci_conn_put(p->conn);
1591 p->conn = NULL;
1592 }
1593 list_del_init(&p->action);
1594 }
1595
1596 BT_DBG("All LE pending actions cleared");
1597 }
1598
hci_dev_do_close(struct hci_dev * hdev)1599 int hci_dev_do_close(struct hci_dev *hdev)
1600 {
1601 bool auto_off;
1602
1603 BT_DBG("%s %p", hdev->name, hdev);
1604
1605 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1606 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1607 test_bit(HCI_UP, &hdev->flags)) {
1608 /* Execute vendor specific shutdown routine */
1609 if (hdev->shutdown)
1610 hdev->shutdown(hdev);
1611 }
1612
1613 cancel_delayed_work(&hdev->power_off);
1614
1615 hci_request_cancel_all(hdev);
1616 hci_req_sync_lock(hdev);
1617
1618 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1619 cancel_delayed_work_sync(&hdev->cmd_timer);
1620 hci_req_sync_unlock(hdev);
1621 return 0;
1622 }
1623
1624 hci_leds_update_powered(hdev, false);
1625
1626 /* Flush RX and TX works */
1627 flush_work(&hdev->tx_work);
1628 flush_work(&hdev->rx_work);
1629
1630 if (hdev->discov_timeout > 0) {
1631 hdev->discov_timeout = 0;
1632 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1633 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1634 }
1635
1636 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1637 cancel_delayed_work(&hdev->service_cache);
1638
1639 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1640 struct adv_info *adv_instance;
1641
1642 cancel_delayed_work_sync(&hdev->rpa_expired);
1643
1644 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1645 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1646 }
1647
1648 /* Avoid potential lockdep warnings from the *_flush() calls by
1649 * ensuring the workqueue is empty up front.
1650 */
1651 drain_workqueue(hdev->workqueue);
1652
1653 hci_dev_lock(hdev);
1654
1655 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1656
1657 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1658
1659 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1660 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1661 hci_dev_test_flag(hdev, HCI_MGMT))
1662 __mgmt_power_off(hdev);
1663
1664 hci_inquiry_cache_flush(hdev);
1665 hci_pend_le_actions_clear(hdev);
1666 hci_conn_hash_flush(hdev);
1667 hci_dev_unlock(hdev);
1668
1669 smp_unregister(hdev);
1670
1671 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1672
1673 if (hdev->flush)
1674 hdev->flush(hdev);
1675
1676 /* Reset device */
1677 skb_queue_purge(&hdev->cmd_q);
1678 atomic_set(&hdev->cmd_cnt, 1);
1679 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1680 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1681 set_bit(HCI_INIT, &hdev->flags);
1682 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1683 clear_bit(HCI_INIT, &hdev->flags);
1684 }
1685
1686 /* flush cmd work */
1687 flush_work(&hdev->cmd_work);
1688
1689 /* Drop queues */
1690 skb_queue_purge(&hdev->rx_q);
1691 skb_queue_purge(&hdev->cmd_q);
1692 skb_queue_purge(&hdev->raw_q);
1693
1694 /* Drop last sent command */
1695 if (hdev->sent_cmd) {
1696 cancel_delayed_work_sync(&hdev->cmd_timer);
1697 kfree_skb(hdev->sent_cmd);
1698 hdev->sent_cmd = NULL;
1699 }
1700
1701 clear_bit(HCI_RUNNING, &hdev->flags);
1702 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1703
1704 /* After this point our queues are empty
1705 * and no tasks are scheduled. */
1706 hdev->close(hdev);
1707
1708 /* Clear flags */
1709 hdev->flags &= BIT(HCI_RAW);
1710 hci_dev_clear_volatile_flags(hdev);
1711
1712 /* Controller radio is available but is currently powered down */
1713 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1714
1715 memset(hdev->eir, 0, sizeof(hdev->eir));
1716 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1717 bacpy(&hdev->random_addr, BDADDR_ANY);
1718
1719 hci_req_sync_unlock(hdev);
1720
1721 hci_dev_put(hdev);
1722 return 0;
1723 }
1724
hci_dev_close(__u16 dev)1725 int hci_dev_close(__u16 dev)
1726 {
1727 struct hci_dev *hdev;
1728 int err;
1729
1730 hdev = hci_dev_get(dev);
1731 if (!hdev)
1732 return -ENODEV;
1733
1734 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1735 err = -EBUSY;
1736 goto done;
1737 }
1738
1739 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1740 cancel_delayed_work(&hdev->power_off);
1741
1742 err = hci_dev_do_close(hdev);
1743
1744 done:
1745 hci_dev_put(hdev);
1746 return err;
1747 }
1748
hci_dev_do_reset(struct hci_dev * hdev)1749 static int hci_dev_do_reset(struct hci_dev *hdev)
1750 {
1751 int ret;
1752
1753 BT_DBG("%s %p", hdev->name, hdev);
1754
1755 hci_req_sync_lock(hdev);
1756
1757 /* Drop queues */
1758 skb_queue_purge(&hdev->rx_q);
1759 skb_queue_purge(&hdev->cmd_q);
1760
1761 /* Avoid potential lockdep warnings from the *_flush() calls by
1762 * ensuring the workqueue is empty up front.
1763 */
1764 drain_workqueue(hdev->workqueue);
1765
1766 hci_dev_lock(hdev);
1767 hci_inquiry_cache_flush(hdev);
1768 hci_conn_hash_flush(hdev);
1769 hci_dev_unlock(hdev);
1770
1771 if (hdev->flush)
1772 hdev->flush(hdev);
1773
1774 atomic_set(&hdev->cmd_cnt, 1);
1775 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1776
1777 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1778
1779 hci_req_sync_unlock(hdev);
1780 return ret;
1781 }
1782
hci_dev_reset(__u16 dev)1783 int hci_dev_reset(__u16 dev)
1784 {
1785 struct hci_dev *hdev;
1786 int err;
1787
1788 hdev = hci_dev_get(dev);
1789 if (!hdev)
1790 return -ENODEV;
1791
1792 if (!test_bit(HCI_UP, &hdev->flags)) {
1793 err = -ENETDOWN;
1794 goto done;
1795 }
1796
1797 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1798 err = -EBUSY;
1799 goto done;
1800 }
1801
1802 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1803 err = -EOPNOTSUPP;
1804 goto done;
1805 }
1806
1807 err = hci_dev_do_reset(hdev);
1808
1809 done:
1810 hci_dev_put(hdev);
1811 return err;
1812 }
1813
hci_dev_reset_stat(__u16 dev)1814 int hci_dev_reset_stat(__u16 dev)
1815 {
1816 struct hci_dev *hdev;
1817 int ret = 0;
1818
1819 hdev = hci_dev_get(dev);
1820 if (!hdev)
1821 return -ENODEV;
1822
1823 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1824 ret = -EBUSY;
1825 goto done;
1826 }
1827
1828 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1829 ret = -EOPNOTSUPP;
1830 goto done;
1831 }
1832
1833 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1834
1835 done:
1836 hci_dev_put(hdev);
1837 return ret;
1838 }
1839
hci_update_scan_state(struct hci_dev * hdev,u8 scan)1840 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1841 {
1842 bool conn_changed, discov_changed;
1843
1844 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1845
1846 if ((scan & SCAN_PAGE))
1847 conn_changed = !hci_dev_test_and_set_flag(hdev,
1848 HCI_CONNECTABLE);
1849 else
1850 conn_changed = hci_dev_test_and_clear_flag(hdev,
1851 HCI_CONNECTABLE);
1852
1853 if ((scan & SCAN_INQUIRY)) {
1854 discov_changed = !hci_dev_test_and_set_flag(hdev,
1855 HCI_DISCOVERABLE);
1856 } else {
1857 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1858 discov_changed = hci_dev_test_and_clear_flag(hdev,
1859 HCI_DISCOVERABLE);
1860 }
1861
1862 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1863 return;
1864
1865 if (conn_changed || discov_changed) {
1866 /* In case this was disabled through mgmt */
1867 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1868
1869 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1870 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1871
1872 mgmt_new_settings(hdev);
1873 }
1874 }
1875
hci_dev_cmd(unsigned int cmd,void __user * arg)1876 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1877 {
1878 struct hci_dev *hdev;
1879 struct hci_dev_req dr;
1880 int err = 0;
1881
1882 if (copy_from_user(&dr, arg, sizeof(dr)))
1883 return -EFAULT;
1884
1885 hdev = hci_dev_get(dr.dev_id);
1886 if (!hdev)
1887 return -ENODEV;
1888
1889 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1890 err = -EBUSY;
1891 goto done;
1892 }
1893
1894 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1895 err = -EOPNOTSUPP;
1896 goto done;
1897 }
1898
1899 if (hdev->dev_type != HCI_PRIMARY) {
1900 err = -EOPNOTSUPP;
1901 goto done;
1902 }
1903
1904 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1905 err = -EOPNOTSUPP;
1906 goto done;
1907 }
1908
1909 switch (cmd) {
1910 case HCISETAUTH:
1911 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1912 HCI_INIT_TIMEOUT, NULL);
1913 break;
1914
1915 case HCISETENCRYPT:
1916 if (!lmp_encrypt_capable(hdev)) {
1917 err = -EOPNOTSUPP;
1918 break;
1919 }
1920
1921 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1922 /* Auth must be enabled first */
1923 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1924 HCI_INIT_TIMEOUT, NULL);
1925 if (err)
1926 break;
1927 }
1928
1929 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1930 HCI_INIT_TIMEOUT, NULL);
1931 break;
1932
1933 case HCISETSCAN:
1934 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1935 HCI_INIT_TIMEOUT, NULL);
1936
1937 /* Ensure that the connectable and discoverable states
1938 * get correctly modified as this was a non-mgmt change.
1939 */
1940 if (!err)
1941 hci_update_scan_state(hdev, dr.dev_opt);
1942 break;
1943
1944 case HCISETLINKPOL:
1945 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1946 HCI_INIT_TIMEOUT, NULL);
1947 break;
1948
1949 case HCISETLINKMODE:
1950 hdev->link_mode = ((__u16) dr.dev_opt) &
1951 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1952 break;
1953
1954 case HCISETPTYPE:
1955 if (hdev->pkt_type == (__u16) dr.dev_opt)
1956 break;
1957
1958 hdev->pkt_type = (__u16) dr.dev_opt;
1959 mgmt_phy_configuration_changed(hdev, NULL);
1960 break;
1961
1962 case HCISETACLMTU:
1963 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1964 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1965 break;
1966
1967 case HCISETSCOMTU:
1968 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1969 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1970 break;
1971
1972 default:
1973 err = -EINVAL;
1974 break;
1975 }
1976
1977 done:
1978 hci_dev_put(hdev);
1979 return err;
1980 }
1981
hci_get_dev_list(void __user * arg)1982 int hci_get_dev_list(void __user *arg)
1983 {
1984 struct hci_dev *hdev;
1985 struct hci_dev_list_req *dl;
1986 struct hci_dev_req *dr;
1987 int n = 0, size, err;
1988 __u16 dev_num;
1989
1990 if (get_user(dev_num, (__u16 __user *) arg))
1991 return -EFAULT;
1992
1993 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1994 return -EINVAL;
1995
1996 size = sizeof(*dl) + dev_num * sizeof(*dr);
1997
1998 dl = kzalloc(size, GFP_KERNEL);
1999 if (!dl)
2000 return -ENOMEM;
2001
2002 dr = dl->dev_req;
2003
2004 read_lock(&hci_dev_list_lock);
2005 list_for_each_entry(hdev, &hci_dev_list, list) {
2006 unsigned long flags = hdev->flags;
2007
2008 /* When the auto-off is configured it means the transport
2009 * is running, but in that case still indicate that the
2010 * device is actually down.
2011 */
2012 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2013 flags &= ~BIT(HCI_UP);
2014
2015 (dr + n)->dev_id = hdev->id;
2016 (dr + n)->dev_opt = flags;
2017
2018 if (++n >= dev_num)
2019 break;
2020 }
2021 read_unlock(&hci_dev_list_lock);
2022
2023 dl->dev_num = n;
2024 size = sizeof(*dl) + n * sizeof(*dr);
2025
2026 err = copy_to_user(arg, dl, size);
2027 kfree(dl);
2028
2029 return err ? -EFAULT : 0;
2030 }
2031
hci_get_dev_info(void __user * arg)2032 int hci_get_dev_info(void __user *arg)
2033 {
2034 struct hci_dev *hdev;
2035 struct hci_dev_info di;
2036 unsigned long flags;
2037 int err = 0;
2038
2039 if (copy_from_user(&di, arg, sizeof(di)))
2040 return -EFAULT;
2041
2042 hdev = hci_dev_get(di.dev_id);
2043 if (!hdev)
2044 return -ENODEV;
2045
2046 /* When the auto-off is configured it means the transport
2047 * is running, but in that case still indicate that the
2048 * device is actually down.
2049 */
2050 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2051 flags = hdev->flags & ~BIT(HCI_UP);
2052 else
2053 flags = hdev->flags;
2054
2055 strcpy(di.name, hdev->name);
2056 di.bdaddr = hdev->bdaddr;
2057 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2058 di.flags = flags;
2059 di.pkt_type = hdev->pkt_type;
2060 if (lmp_bredr_capable(hdev)) {
2061 di.acl_mtu = hdev->acl_mtu;
2062 di.acl_pkts = hdev->acl_pkts;
2063 di.sco_mtu = hdev->sco_mtu;
2064 di.sco_pkts = hdev->sco_pkts;
2065 } else {
2066 di.acl_mtu = hdev->le_mtu;
2067 di.acl_pkts = hdev->le_pkts;
2068 di.sco_mtu = 0;
2069 di.sco_pkts = 0;
2070 }
2071 di.link_policy = hdev->link_policy;
2072 di.link_mode = hdev->link_mode;
2073
2074 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2075 memcpy(&di.features, &hdev->features, sizeof(di.features));
2076
2077 if (copy_to_user(arg, &di, sizeof(di)))
2078 err = -EFAULT;
2079
2080 hci_dev_put(hdev);
2081
2082 return err;
2083 }
2084
2085 /* ---- Interface to HCI drivers ---- */
2086
hci_rfkill_set_block(void * data,bool blocked)2087 static int hci_rfkill_set_block(void *data, bool blocked)
2088 {
2089 struct hci_dev *hdev = data;
2090
2091 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2092
2093 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2094 return -EBUSY;
2095
2096 if (blocked) {
2097 hci_dev_set_flag(hdev, HCI_RFKILLED);
2098 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2099 !hci_dev_test_flag(hdev, HCI_CONFIG))
2100 hci_dev_do_close(hdev);
2101 } else {
2102 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2103 }
2104
2105 return 0;
2106 }
2107
2108 static const struct rfkill_ops hci_rfkill_ops = {
2109 .set_block = hci_rfkill_set_block,
2110 };
2111
hci_power_on(struct work_struct * work)2112 static void hci_power_on(struct work_struct *work)
2113 {
2114 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2115 int err;
2116
2117 BT_DBG("%s", hdev->name);
2118
2119 if (test_bit(HCI_UP, &hdev->flags) &&
2120 hci_dev_test_flag(hdev, HCI_MGMT) &&
2121 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2122 cancel_delayed_work(&hdev->power_off);
2123 hci_req_sync_lock(hdev);
2124 err = __hci_req_hci_power_on(hdev);
2125 hci_req_sync_unlock(hdev);
2126 mgmt_power_on(hdev, err);
2127 return;
2128 }
2129
2130 err = hci_dev_do_open(hdev);
2131 if (err < 0) {
2132 hci_dev_lock(hdev);
2133 mgmt_set_powered_failed(hdev, err);
2134 hci_dev_unlock(hdev);
2135 return;
2136 }
2137
2138 /* During the HCI setup phase, a few error conditions are
2139 * ignored and they need to be checked now. If they are still
2140 * valid, it is important to turn the device back off.
2141 */
2142 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2143 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2144 (hdev->dev_type == HCI_PRIMARY &&
2145 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2146 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2147 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2148 hci_dev_do_close(hdev);
2149 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2150 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2151 HCI_AUTO_OFF_TIMEOUT);
2152 }
2153
2154 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2155 /* For unconfigured devices, set the HCI_RAW flag
2156 * so that userspace can easily identify them.
2157 */
2158 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2159 set_bit(HCI_RAW, &hdev->flags);
2160
2161 /* For fully configured devices, this will send
2162 * the Index Added event. For unconfigured devices,
2163 * it will send Unconfigued Index Added event.
2164 *
2165 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2166 * and no event will be send.
2167 */
2168 mgmt_index_added(hdev);
2169 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2170 /* When the controller is now configured, then it
2171 * is important to clear the HCI_RAW flag.
2172 */
2173 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2174 clear_bit(HCI_RAW, &hdev->flags);
2175
2176 /* Powering on the controller with HCI_CONFIG set only
2177 * happens with the transition from unconfigured to
2178 * configured. This will send the Index Added event.
2179 */
2180 mgmt_index_added(hdev);
2181 }
2182 }
2183
hci_power_off(struct work_struct * work)2184 static void hci_power_off(struct work_struct *work)
2185 {
2186 struct hci_dev *hdev = container_of(work, struct hci_dev,
2187 power_off.work);
2188
2189 BT_DBG("%s", hdev->name);
2190
2191 hci_dev_do_close(hdev);
2192 }
2193
hci_error_reset(struct work_struct * work)2194 static void hci_error_reset(struct work_struct *work)
2195 {
2196 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2197
2198 BT_DBG("%s", hdev->name);
2199
2200 if (hdev->hw_error)
2201 hdev->hw_error(hdev, hdev->hw_error_code);
2202 else
2203 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2204
2205 if (hci_dev_do_close(hdev))
2206 return;
2207
2208 hci_dev_do_open(hdev);
2209 }
2210
hci_uuids_clear(struct hci_dev * hdev)2211 void hci_uuids_clear(struct hci_dev *hdev)
2212 {
2213 struct bt_uuid *uuid, *tmp;
2214
2215 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2216 list_del(&uuid->list);
2217 kfree(uuid);
2218 }
2219 }
2220
hci_link_keys_clear(struct hci_dev * hdev)2221 void hci_link_keys_clear(struct hci_dev *hdev)
2222 {
2223 struct link_key *key;
2224
2225 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2226 list_del_rcu(&key->list);
2227 kfree_rcu(key, rcu);
2228 }
2229 }
2230
hci_smp_ltks_clear(struct hci_dev * hdev)2231 void hci_smp_ltks_clear(struct hci_dev *hdev)
2232 {
2233 struct smp_ltk *k;
2234
2235 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2236 list_del_rcu(&k->list);
2237 kfree_rcu(k, rcu);
2238 }
2239 }
2240
hci_smp_irks_clear(struct hci_dev * hdev)2241 void hci_smp_irks_clear(struct hci_dev *hdev)
2242 {
2243 struct smp_irk *k;
2244
2245 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2246 list_del_rcu(&k->list);
2247 kfree_rcu(k, rcu);
2248 }
2249 }
2250
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2251 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2252 {
2253 struct link_key *k;
2254
2255 rcu_read_lock();
2256 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2257 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2258 rcu_read_unlock();
2259 return k;
2260 }
2261 }
2262 rcu_read_unlock();
2263
2264 return NULL;
2265 }
2266
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)2267 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2268 u8 key_type, u8 old_key_type)
2269 {
2270 /* Legacy key */
2271 if (key_type < 0x03)
2272 return true;
2273
2274 /* Debug keys are insecure so don't store them persistently */
2275 if (key_type == HCI_LK_DEBUG_COMBINATION)
2276 return false;
2277
2278 /* Changed combination key and there's no previous one */
2279 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2280 return false;
2281
2282 /* Security mode 3 case */
2283 if (!conn)
2284 return true;
2285
2286 /* BR/EDR key derived using SC from an LE link */
2287 if (conn->type == LE_LINK)
2288 return true;
2289
2290 /* Neither local nor remote side had no-bonding as requirement */
2291 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2292 return true;
2293
2294 /* Local side had dedicated bonding as requirement */
2295 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2296 return true;
2297
2298 /* Remote side had dedicated bonding as requirement */
2299 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2300 return true;
2301
2302 /* If none of the above criteria match, then don't store the key
2303 * persistently */
2304 return false;
2305 }
2306
ltk_role(u8 type)2307 static u8 ltk_role(u8 type)
2308 {
2309 if (type == SMP_LTK)
2310 return HCI_ROLE_MASTER;
2311
2312 return HCI_ROLE_SLAVE;
2313 }
2314
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)2315 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2316 u8 addr_type, u8 role)
2317 {
2318 struct smp_ltk *k;
2319
2320 rcu_read_lock();
2321 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2322 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2323 continue;
2324
2325 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2326 rcu_read_unlock();
2327 return k;
2328 }
2329 }
2330 rcu_read_unlock();
2331
2332 return NULL;
2333 }
2334
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)2335 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2336 {
2337 struct smp_irk *irk;
2338
2339 rcu_read_lock();
2340 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2341 if (!bacmp(&irk->rpa, rpa)) {
2342 rcu_read_unlock();
2343 return irk;
2344 }
2345 }
2346
2347 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2348 if (smp_irk_matches(hdev, irk->val, rpa)) {
2349 bacpy(&irk->rpa, rpa);
2350 rcu_read_unlock();
2351 return irk;
2352 }
2353 }
2354 rcu_read_unlock();
2355
2356 return NULL;
2357 }
2358
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2359 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2360 u8 addr_type)
2361 {
2362 struct smp_irk *irk;
2363
2364 /* Identity Address must be public or static random */
2365 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2366 return NULL;
2367
2368 rcu_read_lock();
2369 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2370 if (addr_type == irk->addr_type &&
2371 bacmp(bdaddr, &irk->bdaddr) == 0) {
2372 rcu_read_unlock();
2373 return irk;
2374 }
2375 }
2376 rcu_read_unlock();
2377
2378 return NULL;
2379 }
2380
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)2381 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2382 bdaddr_t *bdaddr, u8 *val, u8 type,
2383 u8 pin_len, bool *persistent)
2384 {
2385 struct link_key *key, *old_key;
2386 u8 old_key_type;
2387
2388 old_key = hci_find_link_key(hdev, bdaddr);
2389 if (old_key) {
2390 old_key_type = old_key->type;
2391 key = old_key;
2392 } else {
2393 old_key_type = conn ? conn->key_type : 0xff;
2394 key = kzalloc(sizeof(*key), GFP_KERNEL);
2395 if (!key)
2396 return NULL;
2397 list_add_rcu(&key->list, &hdev->link_keys);
2398 }
2399
2400 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2401
2402 /* Some buggy controller combinations generate a changed
2403 * combination key for legacy pairing even when there's no
2404 * previous key */
2405 if (type == HCI_LK_CHANGED_COMBINATION &&
2406 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2407 type = HCI_LK_COMBINATION;
2408 if (conn)
2409 conn->key_type = type;
2410 }
2411
2412 bacpy(&key->bdaddr, bdaddr);
2413 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2414 key->pin_len = pin_len;
2415
2416 if (type == HCI_LK_CHANGED_COMBINATION)
2417 key->type = old_key_type;
2418 else
2419 key->type = type;
2420
2421 if (persistent)
2422 *persistent = hci_persistent_key(hdev, conn, type,
2423 old_key_type);
2424
2425 return key;
2426 }
2427
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)2428 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2429 u8 addr_type, u8 type, u8 authenticated,
2430 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2431 {
2432 struct smp_ltk *key, *old_key;
2433 u8 role = ltk_role(type);
2434
2435 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2436 if (old_key)
2437 key = old_key;
2438 else {
2439 key = kzalloc(sizeof(*key), GFP_KERNEL);
2440 if (!key)
2441 return NULL;
2442 list_add_rcu(&key->list, &hdev->long_term_keys);
2443 }
2444
2445 bacpy(&key->bdaddr, bdaddr);
2446 key->bdaddr_type = addr_type;
2447 memcpy(key->val, tk, sizeof(key->val));
2448 key->authenticated = authenticated;
2449 key->ediv = ediv;
2450 key->rand = rand;
2451 key->enc_size = enc_size;
2452 key->type = type;
2453
2454 return key;
2455 }
2456
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)2457 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2458 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2459 {
2460 struct smp_irk *irk;
2461
2462 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2463 if (!irk) {
2464 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2465 if (!irk)
2466 return NULL;
2467
2468 bacpy(&irk->bdaddr, bdaddr);
2469 irk->addr_type = addr_type;
2470
2471 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2472 }
2473
2474 memcpy(irk->val, val, 16);
2475 bacpy(&irk->rpa, rpa);
2476
2477 return irk;
2478 }
2479
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2480 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2481 {
2482 struct link_key *key;
2483
2484 key = hci_find_link_key(hdev, bdaddr);
2485 if (!key)
2486 return -ENOENT;
2487
2488 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2489
2490 list_del_rcu(&key->list);
2491 kfree_rcu(key, rcu);
2492
2493 return 0;
2494 }
2495
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2496 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2497 {
2498 struct smp_ltk *k;
2499 int removed = 0;
2500
2501 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2502 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2503 continue;
2504
2505 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2506
2507 list_del_rcu(&k->list);
2508 kfree_rcu(k, rcu);
2509 removed++;
2510 }
2511
2512 return removed ? 0 : -ENOENT;
2513 }
2514
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2515 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2516 {
2517 struct smp_irk *k;
2518
2519 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2520 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2521 continue;
2522
2523 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2524
2525 list_del_rcu(&k->list);
2526 kfree_rcu(k, rcu);
2527 }
2528 }
2529
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)2530 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2531 {
2532 struct smp_ltk *k;
2533 struct smp_irk *irk;
2534 u8 addr_type;
2535
2536 if (type == BDADDR_BREDR) {
2537 if (hci_find_link_key(hdev, bdaddr))
2538 return true;
2539 return false;
2540 }
2541
2542 /* Convert to HCI addr type which struct smp_ltk uses */
2543 if (type == BDADDR_LE_PUBLIC)
2544 addr_type = ADDR_LE_DEV_PUBLIC;
2545 else
2546 addr_type = ADDR_LE_DEV_RANDOM;
2547
2548 irk = hci_get_irk(hdev, bdaddr, addr_type);
2549 if (irk) {
2550 bdaddr = &irk->bdaddr;
2551 addr_type = irk->addr_type;
2552 }
2553
2554 rcu_read_lock();
2555 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2556 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2557 rcu_read_unlock();
2558 return true;
2559 }
2560 }
2561 rcu_read_unlock();
2562
2563 return false;
2564 }
2565
2566 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)2567 static void hci_cmd_timeout(struct work_struct *work)
2568 {
2569 struct hci_dev *hdev = container_of(work, struct hci_dev,
2570 cmd_timer.work);
2571
2572 if (hdev->sent_cmd) {
2573 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2574 u16 opcode = __le16_to_cpu(sent->opcode);
2575
2576 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2577 } else {
2578 bt_dev_err(hdev, "command tx timeout");
2579 }
2580
2581 atomic_set(&hdev->cmd_cnt, 1);
2582 queue_work(hdev->workqueue, &hdev->cmd_work);
2583 }
2584
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2585 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2586 bdaddr_t *bdaddr, u8 bdaddr_type)
2587 {
2588 struct oob_data *data;
2589
2590 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2591 if (bacmp(bdaddr, &data->bdaddr) != 0)
2592 continue;
2593 if (data->bdaddr_type != bdaddr_type)
2594 continue;
2595 return data;
2596 }
2597
2598 return NULL;
2599 }
2600
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2601 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2602 u8 bdaddr_type)
2603 {
2604 struct oob_data *data;
2605
2606 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2607 if (!data)
2608 return -ENOENT;
2609
2610 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2611
2612 list_del(&data->list);
2613 kfree(data);
2614
2615 return 0;
2616 }
2617
hci_remote_oob_data_clear(struct hci_dev * hdev)2618 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2619 {
2620 struct oob_data *data, *n;
2621
2622 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2623 list_del(&data->list);
2624 kfree(data);
2625 }
2626 }
2627
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)2628 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2629 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2630 u8 *hash256, u8 *rand256)
2631 {
2632 struct oob_data *data;
2633
2634 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2635 if (!data) {
2636 data = kmalloc(sizeof(*data), GFP_KERNEL);
2637 if (!data)
2638 return -ENOMEM;
2639
2640 bacpy(&data->bdaddr, bdaddr);
2641 data->bdaddr_type = bdaddr_type;
2642 list_add(&data->list, &hdev->remote_oob_data);
2643 }
2644
2645 if (hash192 && rand192) {
2646 memcpy(data->hash192, hash192, sizeof(data->hash192));
2647 memcpy(data->rand192, rand192, sizeof(data->rand192));
2648 if (hash256 && rand256)
2649 data->present = 0x03;
2650 } else {
2651 memset(data->hash192, 0, sizeof(data->hash192));
2652 memset(data->rand192, 0, sizeof(data->rand192));
2653 if (hash256 && rand256)
2654 data->present = 0x02;
2655 else
2656 data->present = 0x00;
2657 }
2658
2659 if (hash256 && rand256) {
2660 memcpy(data->hash256, hash256, sizeof(data->hash256));
2661 memcpy(data->rand256, rand256, sizeof(data->rand256));
2662 } else {
2663 memset(data->hash256, 0, sizeof(data->hash256));
2664 memset(data->rand256, 0, sizeof(data->rand256));
2665 if (hash192 && rand192)
2666 data->present = 0x01;
2667 }
2668
2669 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2670
2671 return 0;
2672 }
2673
2674 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)2675 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2676 {
2677 struct adv_info *adv_instance;
2678
2679 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2680 if (adv_instance->instance == instance)
2681 return adv_instance;
2682 }
2683
2684 return NULL;
2685 }
2686
2687 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)2688 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2689 {
2690 struct adv_info *cur_instance;
2691
2692 cur_instance = hci_find_adv_instance(hdev, instance);
2693 if (!cur_instance)
2694 return NULL;
2695
2696 if (cur_instance == list_last_entry(&hdev->adv_instances,
2697 struct adv_info, list))
2698 return list_first_entry(&hdev->adv_instances,
2699 struct adv_info, list);
2700 else
2701 return list_next_entry(cur_instance, list);
2702 }
2703
2704 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)2705 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2706 {
2707 struct adv_info *adv_instance;
2708
2709 adv_instance = hci_find_adv_instance(hdev, instance);
2710 if (!adv_instance)
2711 return -ENOENT;
2712
2713 BT_DBG("%s removing %dMR", hdev->name, instance);
2714
2715 if (hdev->cur_adv_instance == instance) {
2716 if (hdev->adv_instance_timeout) {
2717 cancel_delayed_work(&hdev->adv_instance_expire);
2718 hdev->adv_instance_timeout = 0;
2719 }
2720 hdev->cur_adv_instance = 0x00;
2721 }
2722
2723 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2724
2725 list_del(&adv_instance->list);
2726 kfree(adv_instance);
2727
2728 hdev->adv_instance_cnt--;
2729
2730 return 0;
2731 }
2732
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)2733 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2734 {
2735 struct adv_info *adv_instance, *n;
2736
2737 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2738 adv_instance->rpa_expired = rpa_expired;
2739 }
2740
2741 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)2742 void hci_adv_instances_clear(struct hci_dev *hdev)
2743 {
2744 struct adv_info *adv_instance, *n;
2745
2746 if (hdev->adv_instance_timeout) {
2747 cancel_delayed_work(&hdev->adv_instance_expire);
2748 hdev->adv_instance_timeout = 0;
2749 }
2750
2751 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2752 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2753 list_del(&adv_instance->list);
2754 kfree(adv_instance);
2755 }
2756
2757 hdev->adv_instance_cnt = 0;
2758 hdev->cur_adv_instance = 0x00;
2759 }
2760
adv_instance_rpa_expired(struct work_struct * work)2761 static void adv_instance_rpa_expired(struct work_struct *work)
2762 {
2763 struct adv_info *adv_instance = container_of(work, struct adv_info,
2764 rpa_expired_cb.work);
2765
2766 BT_DBG("");
2767
2768 adv_instance->rpa_expired = true;
2769 }
2770
2771 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration)2772 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2773 u16 adv_data_len, u8 *adv_data,
2774 u16 scan_rsp_len, u8 *scan_rsp_data,
2775 u16 timeout, u16 duration)
2776 {
2777 struct adv_info *adv_instance;
2778
2779 adv_instance = hci_find_adv_instance(hdev, instance);
2780 if (adv_instance) {
2781 memset(adv_instance->adv_data, 0,
2782 sizeof(adv_instance->adv_data));
2783 memset(adv_instance->scan_rsp_data, 0,
2784 sizeof(adv_instance->scan_rsp_data));
2785 } else {
2786 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2787 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2788 return -EOVERFLOW;
2789
2790 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2791 if (!adv_instance)
2792 return -ENOMEM;
2793
2794 adv_instance->pending = true;
2795 adv_instance->instance = instance;
2796 list_add(&adv_instance->list, &hdev->adv_instances);
2797 hdev->adv_instance_cnt++;
2798 }
2799
2800 adv_instance->flags = flags;
2801 adv_instance->adv_data_len = adv_data_len;
2802 adv_instance->scan_rsp_len = scan_rsp_len;
2803
2804 if (adv_data_len)
2805 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2806
2807 if (scan_rsp_len)
2808 memcpy(adv_instance->scan_rsp_data,
2809 scan_rsp_data, scan_rsp_len);
2810
2811 adv_instance->timeout = timeout;
2812 adv_instance->remaining_time = timeout;
2813
2814 if (duration == 0)
2815 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2816 else
2817 adv_instance->duration = duration;
2818
2819 adv_instance->tx_power = HCI_TX_POWER_INVALID;
2820
2821 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2822 adv_instance_rpa_expired);
2823
2824 BT_DBG("%s for %dMR", hdev->name, instance);
2825
2826 return 0;
2827 }
2828
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2829 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2830 bdaddr_t *bdaddr, u8 type)
2831 {
2832 struct bdaddr_list *b;
2833
2834 list_for_each_entry(b, bdaddr_list, list) {
2835 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2836 return b;
2837 }
2838
2839 return NULL;
2840 }
2841
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2842 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2843 {
2844 struct bdaddr_list *b, *n;
2845
2846 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2847 list_del(&b->list);
2848 kfree(b);
2849 }
2850 }
2851
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2852 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2853 {
2854 struct bdaddr_list *entry;
2855
2856 if (!bacmp(bdaddr, BDADDR_ANY))
2857 return -EBADF;
2858
2859 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2860 return -EEXIST;
2861
2862 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2863 if (!entry)
2864 return -ENOMEM;
2865
2866 bacpy(&entry->bdaddr, bdaddr);
2867 entry->bdaddr_type = type;
2868
2869 list_add(&entry->list, list);
2870
2871 return 0;
2872 }
2873
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2874 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2875 {
2876 struct bdaddr_list *entry;
2877
2878 if (!bacmp(bdaddr, BDADDR_ANY)) {
2879 hci_bdaddr_list_clear(list);
2880 return 0;
2881 }
2882
2883 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2884 if (!entry)
2885 return -ENOENT;
2886
2887 list_del(&entry->list);
2888 kfree(entry);
2889
2890 return 0;
2891 }
2892
2893 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2894 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2895 bdaddr_t *addr, u8 addr_type)
2896 {
2897 struct hci_conn_params *params;
2898
2899 list_for_each_entry(params, &hdev->le_conn_params, list) {
2900 if (bacmp(¶ms->addr, addr) == 0 &&
2901 params->addr_type == addr_type) {
2902 return params;
2903 }
2904 }
2905
2906 return NULL;
2907 }
2908
2909 /* This function requires the caller holds hdev->lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2910 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2911 bdaddr_t *addr, u8 addr_type)
2912 {
2913 struct hci_conn_params *param;
2914
2915 list_for_each_entry(param, list, action) {
2916 if (bacmp(¶m->addr, addr) == 0 &&
2917 param->addr_type == addr_type)
2918 return param;
2919 }
2920
2921 return NULL;
2922 }
2923
2924 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2925 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2926 bdaddr_t *addr, u8 addr_type)
2927 {
2928 struct hci_conn_params *params;
2929
2930 params = hci_conn_params_lookup(hdev, addr, addr_type);
2931 if (params)
2932 return params;
2933
2934 params = kzalloc(sizeof(*params), GFP_KERNEL);
2935 if (!params) {
2936 bt_dev_err(hdev, "out of memory");
2937 return NULL;
2938 }
2939
2940 bacpy(¶ms->addr, addr);
2941 params->addr_type = addr_type;
2942
2943 list_add(¶ms->list, &hdev->le_conn_params);
2944 INIT_LIST_HEAD(¶ms->action);
2945
2946 params->conn_min_interval = hdev->le_conn_min_interval;
2947 params->conn_max_interval = hdev->le_conn_max_interval;
2948 params->conn_latency = hdev->le_conn_latency;
2949 params->supervision_timeout = hdev->le_supv_timeout;
2950 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2951
2952 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2953
2954 return params;
2955 }
2956
hci_conn_params_free(struct hci_conn_params * params)2957 static void hci_conn_params_free(struct hci_conn_params *params)
2958 {
2959 if (params->conn) {
2960 hci_conn_drop(params->conn);
2961 hci_conn_put(params->conn);
2962 }
2963
2964 list_del(¶ms->action);
2965 list_del(¶ms->list);
2966 kfree(params);
2967 }
2968
2969 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2970 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2971 {
2972 struct hci_conn_params *params;
2973
2974 params = hci_conn_params_lookup(hdev, addr, addr_type);
2975 if (!params)
2976 return;
2977
2978 hci_conn_params_free(params);
2979
2980 hci_update_background_scan(hdev);
2981
2982 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2983 }
2984
2985 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2986 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2987 {
2988 struct hci_conn_params *params, *tmp;
2989
2990 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2991 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2992 continue;
2993
2994 /* If trying to estabilish one time connection to disabled
2995 * device, leave the params, but mark them as just once.
2996 */
2997 if (params->explicit_connect) {
2998 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2999 continue;
3000 }
3001
3002 list_del(¶ms->list);
3003 kfree(params);
3004 }
3005
3006 BT_DBG("All LE disabled connection parameters were removed");
3007 }
3008
3009 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)3010 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3011 {
3012 struct hci_conn_params *params, *tmp;
3013
3014 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3015 hci_conn_params_free(params);
3016
3017 BT_DBG("All LE connection parameters were removed");
3018 }
3019
3020 /* Copy the Identity Address of the controller.
3021 *
3022 * If the controller has a public BD_ADDR, then by default use that one.
3023 * If this is a LE only controller without a public address, default to
3024 * the static random address.
3025 *
3026 * For debugging purposes it is possible to force controllers with a
3027 * public address to use the static random address instead.
3028 *
3029 * In case BR/EDR has been disabled on a dual-mode controller and
3030 * userspace has configured a static address, then that address
3031 * becomes the identity address instead of the public BR/EDR address.
3032 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)3033 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3034 u8 *bdaddr_type)
3035 {
3036 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3037 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3038 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3039 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3040 bacpy(bdaddr, &hdev->static_addr);
3041 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3042 } else {
3043 bacpy(bdaddr, &hdev->bdaddr);
3044 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3045 }
3046 }
3047
3048 /* Alloc HCI device */
hci_alloc_dev(void)3049 struct hci_dev *hci_alloc_dev(void)
3050 {
3051 struct hci_dev *hdev;
3052
3053 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3054 if (!hdev)
3055 return NULL;
3056
3057 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3058 hdev->esco_type = (ESCO_HV1);
3059 hdev->link_mode = (HCI_LM_ACCEPT);
3060 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3061 hdev->io_capability = 0x03; /* No Input No Output */
3062 hdev->manufacturer = 0xffff; /* Default to internal use */
3063 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3064 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3065 hdev->adv_instance_cnt = 0;
3066 hdev->cur_adv_instance = 0x00;
3067 hdev->adv_instance_timeout = 0;
3068
3069 hdev->sniff_max_interval = 800;
3070 hdev->sniff_min_interval = 80;
3071
3072 hdev->le_adv_channel_map = 0x07;
3073 hdev->le_adv_min_interval = 0x0800;
3074 hdev->le_adv_max_interval = 0x0800;
3075 hdev->le_scan_interval = 0x0060;
3076 hdev->le_scan_window = 0x0030;
3077 hdev->le_conn_min_interval = 0x0018;
3078 hdev->le_conn_max_interval = 0x0028;
3079 hdev->le_conn_latency = 0x0000;
3080 hdev->le_supv_timeout = 0x002a;
3081 hdev->le_def_tx_len = 0x001b;
3082 hdev->le_def_tx_time = 0x0148;
3083 hdev->le_max_tx_len = 0x001b;
3084 hdev->le_max_tx_time = 0x0148;
3085 hdev->le_max_rx_len = 0x001b;
3086 hdev->le_max_rx_time = 0x0148;
3087 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3088 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3089
3090 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3091 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3092 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3093 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3094
3095 mutex_init(&hdev->lock);
3096 mutex_init(&hdev->req_lock);
3097
3098 INIT_LIST_HEAD(&hdev->mgmt_pending);
3099 INIT_LIST_HEAD(&hdev->blacklist);
3100 INIT_LIST_HEAD(&hdev->whitelist);
3101 INIT_LIST_HEAD(&hdev->uuids);
3102 INIT_LIST_HEAD(&hdev->link_keys);
3103 INIT_LIST_HEAD(&hdev->long_term_keys);
3104 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3105 INIT_LIST_HEAD(&hdev->remote_oob_data);
3106 INIT_LIST_HEAD(&hdev->le_white_list);
3107 INIT_LIST_HEAD(&hdev->le_resolv_list);
3108 INIT_LIST_HEAD(&hdev->le_conn_params);
3109 INIT_LIST_HEAD(&hdev->pend_le_conns);
3110 INIT_LIST_HEAD(&hdev->pend_le_reports);
3111 INIT_LIST_HEAD(&hdev->conn_hash.list);
3112 INIT_LIST_HEAD(&hdev->adv_instances);
3113
3114 INIT_WORK(&hdev->rx_work, hci_rx_work);
3115 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3116 INIT_WORK(&hdev->tx_work, hci_tx_work);
3117 INIT_WORK(&hdev->power_on, hci_power_on);
3118 INIT_WORK(&hdev->error_reset, hci_error_reset);
3119
3120 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3121
3122 skb_queue_head_init(&hdev->rx_q);
3123 skb_queue_head_init(&hdev->cmd_q);
3124 skb_queue_head_init(&hdev->raw_q);
3125
3126 init_waitqueue_head(&hdev->req_wait_q);
3127
3128 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3129
3130 hci_request_setup(hdev);
3131
3132 hci_init_sysfs(hdev);
3133 discovery_init(hdev);
3134
3135 return hdev;
3136 }
3137 EXPORT_SYMBOL(hci_alloc_dev);
3138
3139 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)3140 void hci_free_dev(struct hci_dev *hdev)
3141 {
3142 /* will free via device release */
3143 put_device(&hdev->dev);
3144 }
3145 EXPORT_SYMBOL(hci_free_dev);
3146
3147 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)3148 int hci_register_dev(struct hci_dev *hdev)
3149 {
3150 int id, error;
3151
3152 if (!hdev->open || !hdev->close || !hdev->send)
3153 return -EINVAL;
3154
3155 /* Do not allow HCI_AMP devices to register at index 0,
3156 * so the index can be used as the AMP controller ID.
3157 */
3158 switch (hdev->dev_type) {
3159 case HCI_PRIMARY:
3160 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3161 break;
3162 case HCI_AMP:
3163 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3164 break;
3165 default:
3166 return -EINVAL;
3167 }
3168
3169 if (id < 0)
3170 return id;
3171
3172 sprintf(hdev->name, "hci%d", id);
3173 hdev->id = id;
3174
3175 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3176
3177 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3178 if (!hdev->workqueue) {
3179 error = -ENOMEM;
3180 goto err;
3181 }
3182
3183 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3184 hdev->name);
3185 if (!hdev->req_workqueue) {
3186 destroy_workqueue(hdev->workqueue);
3187 error = -ENOMEM;
3188 goto err;
3189 }
3190
3191 if (!IS_ERR_OR_NULL(bt_debugfs))
3192 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3193
3194 dev_set_name(&hdev->dev, "%s", hdev->name);
3195
3196 error = device_add(&hdev->dev);
3197 if (error < 0)
3198 goto err_wqueue;
3199
3200 hci_leds_init(hdev);
3201
3202 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3203 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3204 hdev);
3205 if (hdev->rfkill) {
3206 if (rfkill_register(hdev->rfkill) < 0) {
3207 rfkill_destroy(hdev->rfkill);
3208 hdev->rfkill = NULL;
3209 }
3210 }
3211
3212 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3213 hci_dev_set_flag(hdev, HCI_RFKILLED);
3214
3215 hci_dev_set_flag(hdev, HCI_SETUP);
3216 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3217
3218 if (hdev->dev_type == HCI_PRIMARY) {
3219 /* Assume BR/EDR support until proven otherwise (such as
3220 * through reading supported features during init.
3221 */
3222 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3223 }
3224
3225 write_lock(&hci_dev_list_lock);
3226 list_add(&hdev->list, &hci_dev_list);
3227 write_unlock(&hci_dev_list_lock);
3228
3229 /* Devices that are marked for raw-only usage are unconfigured
3230 * and should not be included in normal operation.
3231 */
3232 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3233 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3234
3235 hci_sock_dev_event(hdev, HCI_DEV_REG);
3236 hci_dev_hold(hdev);
3237
3238 queue_work(hdev->req_workqueue, &hdev->power_on);
3239
3240 return id;
3241
3242 err_wqueue:
3243 destroy_workqueue(hdev->workqueue);
3244 destroy_workqueue(hdev->req_workqueue);
3245 err:
3246 ida_simple_remove(&hci_index_ida, hdev->id);
3247
3248 return error;
3249 }
3250 EXPORT_SYMBOL(hci_register_dev);
3251
3252 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)3253 void hci_unregister_dev(struct hci_dev *hdev)
3254 {
3255 int id;
3256
3257 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3258
3259 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3260
3261 id = hdev->id;
3262
3263 write_lock(&hci_dev_list_lock);
3264 list_del(&hdev->list);
3265 write_unlock(&hci_dev_list_lock);
3266
3267 cancel_work_sync(&hdev->power_on);
3268
3269 hci_dev_do_close(hdev);
3270
3271 if (!test_bit(HCI_INIT, &hdev->flags) &&
3272 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3273 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3274 hci_dev_lock(hdev);
3275 mgmt_index_removed(hdev);
3276 hci_dev_unlock(hdev);
3277 }
3278
3279 /* mgmt_index_removed should take care of emptying the
3280 * pending list */
3281 BUG_ON(!list_empty(&hdev->mgmt_pending));
3282
3283 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3284
3285 if (hdev->rfkill) {
3286 rfkill_unregister(hdev->rfkill);
3287 rfkill_destroy(hdev->rfkill);
3288 }
3289
3290 device_del(&hdev->dev);
3291
3292 debugfs_remove_recursive(hdev->debugfs);
3293 kfree_const(hdev->hw_info);
3294 kfree_const(hdev->fw_info);
3295
3296 destroy_workqueue(hdev->workqueue);
3297 destroy_workqueue(hdev->req_workqueue);
3298
3299 hci_dev_lock(hdev);
3300 hci_bdaddr_list_clear(&hdev->blacklist);
3301 hci_bdaddr_list_clear(&hdev->whitelist);
3302 hci_uuids_clear(hdev);
3303 hci_link_keys_clear(hdev);
3304 hci_smp_ltks_clear(hdev);
3305 hci_smp_irks_clear(hdev);
3306 hci_remote_oob_data_clear(hdev);
3307 hci_adv_instances_clear(hdev);
3308 hci_bdaddr_list_clear(&hdev->le_white_list);
3309 hci_bdaddr_list_clear(&hdev->le_resolv_list);
3310 hci_conn_params_clear_all(hdev);
3311 hci_discovery_filter_clear(hdev);
3312 hci_dev_unlock(hdev);
3313
3314 hci_dev_put(hdev);
3315
3316 ida_simple_remove(&hci_index_ida, id);
3317 }
3318 EXPORT_SYMBOL(hci_unregister_dev);
3319
3320 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)3321 int hci_suspend_dev(struct hci_dev *hdev)
3322 {
3323 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3324 return 0;
3325 }
3326 EXPORT_SYMBOL(hci_suspend_dev);
3327
3328 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)3329 int hci_resume_dev(struct hci_dev *hdev)
3330 {
3331 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3332 return 0;
3333 }
3334 EXPORT_SYMBOL(hci_resume_dev);
3335
3336 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)3337 int hci_reset_dev(struct hci_dev *hdev)
3338 {
3339 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3340 struct sk_buff *skb;
3341
3342 skb = bt_skb_alloc(3, GFP_ATOMIC);
3343 if (!skb)
3344 return -ENOMEM;
3345
3346 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3347 skb_put_data(skb, hw_err, 3);
3348
3349 /* Send Hardware Error to upper stack */
3350 return hci_recv_frame(hdev, skb);
3351 }
3352 EXPORT_SYMBOL(hci_reset_dev);
3353
3354 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)3355 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3356 {
3357 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3358 && !test_bit(HCI_INIT, &hdev->flags))) {
3359 kfree_skb(skb);
3360 return -ENXIO;
3361 }
3362
3363 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3364 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3365 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3366 kfree_skb(skb);
3367 return -EINVAL;
3368 }
3369
3370 /* Incoming skb */
3371 bt_cb(skb)->incoming = 1;
3372
3373 /* Time stamp */
3374 __net_timestamp(skb);
3375
3376 skb_queue_tail(&hdev->rx_q, skb);
3377 queue_work(hdev->workqueue, &hdev->rx_work);
3378
3379 return 0;
3380 }
3381 EXPORT_SYMBOL(hci_recv_frame);
3382
3383 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)3384 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3385 {
3386 /* Mark as diagnostic packet */
3387 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3388
3389 /* Time stamp */
3390 __net_timestamp(skb);
3391
3392 skb_queue_tail(&hdev->rx_q, skb);
3393 queue_work(hdev->workqueue, &hdev->rx_work);
3394
3395 return 0;
3396 }
3397 EXPORT_SYMBOL(hci_recv_diag);
3398
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)3399 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3400 {
3401 va_list vargs;
3402
3403 va_start(vargs, fmt);
3404 kfree_const(hdev->hw_info);
3405 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3406 va_end(vargs);
3407 }
3408 EXPORT_SYMBOL(hci_set_hw_info);
3409
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)3410 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3411 {
3412 va_list vargs;
3413
3414 va_start(vargs, fmt);
3415 kfree_const(hdev->fw_info);
3416 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3417 va_end(vargs);
3418 }
3419 EXPORT_SYMBOL(hci_set_fw_info);
3420
3421 /* ---- Interface to upper protocols ---- */
3422
hci_register_cb(struct hci_cb * cb)3423 int hci_register_cb(struct hci_cb *cb)
3424 {
3425 BT_DBG("%p name %s", cb, cb->name);
3426
3427 mutex_lock(&hci_cb_list_lock);
3428 list_add_tail(&cb->list, &hci_cb_list);
3429 mutex_unlock(&hci_cb_list_lock);
3430
3431 return 0;
3432 }
3433 EXPORT_SYMBOL(hci_register_cb);
3434
hci_unregister_cb(struct hci_cb * cb)3435 int hci_unregister_cb(struct hci_cb *cb)
3436 {
3437 BT_DBG("%p name %s", cb, cb->name);
3438
3439 mutex_lock(&hci_cb_list_lock);
3440 list_del(&cb->list);
3441 mutex_unlock(&hci_cb_list_lock);
3442
3443 return 0;
3444 }
3445 EXPORT_SYMBOL(hci_unregister_cb);
3446
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3447 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3448 {
3449 int err;
3450
3451 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3452 skb->len);
3453
3454 /* Time stamp */
3455 __net_timestamp(skb);
3456
3457 /* Send copy to monitor */
3458 hci_send_to_monitor(hdev, skb);
3459
3460 if (atomic_read(&hdev->promisc)) {
3461 /* Send copy to the sockets */
3462 hci_send_to_sock(hdev, skb);
3463 }
3464
3465 /* Get rid of skb owner, prior to sending to the driver. */
3466 skb_orphan(skb);
3467
3468 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3469 kfree_skb(skb);
3470 return;
3471 }
3472
3473 err = hdev->send(hdev, skb);
3474 if (err < 0) {
3475 bt_dev_err(hdev, "sending frame failed (%d)", err);
3476 kfree_skb(skb);
3477 }
3478 }
3479
3480 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3481 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3482 const void *param)
3483 {
3484 struct sk_buff *skb;
3485
3486 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3487
3488 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3489 if (!skb) {
3490 bt_dev_err(hdev, "no memory for command");
3491 return -ENOMEM;
3492 }
3493
3494 /* Stand-alone HCI commands must be flagged as
3495 * single-command requests.
3496 */
3497 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3498
3499 skb_queue_tail(&hdev->cmd_q, skb);
3500 queue_work(hdev->workqueue, &hdev->cmd_work);
3501
3502 return 0;
3503 }
3504
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3505 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3506 const void *param)
3507 {
3508 struct sk_buff *skb;
3509
3510 if (hci_opcode_ogf(opcode) != 0x3f) {
3511 /* A controller receiving a command shall respond with either
3512 * a Command Status Event or a Command Complete Event.
3513 * Therefore, all standard HCI commands must be sent via the
3514 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3515 * Some vendors do not comply with this rule for vendor-specific
3516 * commands and do not return any event. We want to support
3517 * unresponded commands for such cases only.
3518 */
3519 bt_dev_err(hdev, "unresponded command not supported");
3520 return -EINVAL;
3521 }
3522
3523 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3524 if (!skb) {
3525 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3526 opcode);
3527 return -ENOMEM;
3528 }
3529
3530 hci_send_frame(hdev, skb);
3531
3532 return 0;
3533 }
3534 EXPORT_SYMBOL(__hci_cmd_send);
3535
3536 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3537 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3538 {
3539 struct hci_command_hdr *hdr;
3540
3541 if (!hdev->sent_cmd)
3542 return NULL;
3543
3544 hdr = (void *) hdev->sent_cmd->data;
3545
3546 if (hdr->opcode != cpu_to_le16(opcode))
3547 return NULL;
3548
3549 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3550
3551 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3552 }
3553
3554 /* Send HCI command and wait for command commplete event */
hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)3555 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3556 const void *param, u32 timeout)
3557 {
3558 struct sk_buff *skb;
3559
3560 if (!test_bit(HCI_UP, &hdev->flags))
3561 return ERR_PTR(-ENETDOWN);
3562
3563 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3564
3565 hci_req_sync_lock(hdev);
3566 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3567 hci_req_sync_unlock(hdev);
3568
3569 return skb;
3570 }
3571 EXPORT_SYMBOL(hci_cmd_sync);
3572
3573 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3574 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3575 {
3576 struct hci_acl_hdr *hdr;
3577 int len = skb->len;
3578
3579 skb_push(skb, HCI_ACL_HDR_SIZE);
3580 skb_reset_transport_header(skb);
3581 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3582 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3583 hdr->dlen = cpu_to_le16(len);
3584 }
3585
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3586 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3587 struct sk_buff *skb, __u16 flags)
3588 {
3589 struct hci_conn *conn = chan->conn;
3590 struct hci_dev *hdev = conn->hdev;
3591 struct sk_buff *list;
3592
3593 skb->len = skb_headlen(skb);
3594 skb->data_len = 0;
3595
3596 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3597
3598 switch (hdev->dev_type) {
3599 case HCI_PRIMARY:
3600 hci_add_acl_hdr(skb, conn->handle, flags);
3601 break;
3602 case HCI_AMP:
3603 hci_add_acl_hdr(skb, chan->handle, flags);
3604 break;
3605 default:
3606 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3607 return;
3608 }
3609
3610 list = skb_shinfo(skb)->frag_list;
3611 if (!list) {
3612 /* Non fragmented */
3613 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3614
3615 skb_queue_tail(queue, skb);
3616 } else {
3617 /* Fragmented */
3618 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3619
3620 skb_shinfo(skb)->frag_list = NULL;
3621
3622 /* Queue all fragments atomically. We need to use spin_lock_bh
3623 * here because of 6LoWPAN links, as there this function is
3624 * called from softirq and using normal spin lock could cause
3625 * deadlocks.
3626 */
3627 spin_lock_bh(&queue->lock);
3628
3629 __skb_queue_tail(queue, skb);
3630
3631 flags &= ~ACL_START;
3632 flags |= ACL_CONT;
3633 do {
3634 skb = list; list = list->next;
3635
3636 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3637 hci_add_acl_hdr(skb, conn->handle, flags);
3638
3639 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3640
3641 __skb_queue_tail(queue, skb);
3642 } while (list);
3643
3644 spin_unlock_bh(&queue->lock);
3645 }
3646 }
3647
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3648 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3649 {
3650 struct hci_dev *hdev = chan->conn->hdev;
3651
3652 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3653
3654 hci_queue_acl(chan, &chan->data_q, skb, flags);
3655
3656 queue_work(hdev->workqueue, &hdev->tx_work);
3657 }
3658
3659 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3660 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3661 {
3662 struct hci_dev *hdev = conn->hdev;
3663 struct hci_sco_hdr hdr;
3664
3665 BT_DBG("%s len %d", hdev->name, skb->len);
3666
3667 hdr.handle = cpu_to_le16(conn->handle);
3668 hdr.dlen = skb->len;
3669
3670 skb_push(skb, HCI_SCO_HDR_SIZE);
3671 skb_reset_transport_header(skb);
3672 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3673
3674 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3675
3676 skb_queue_tail(&conn->data_q, skb);
3677 queue_work(hdev->workqueue, &hdev->tx_work);
3678 }
3679
3680 /* ---- HCI TX task (outgoing data) ---- */
3681
3682 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3683 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3684 int *quote)
3685 {
3686 struct hci_conn_hash *h = &hdev->conn_hash;
3687 struct hci_conn *conn = NULL, *c;
3688 unsigned int num = 0, min = ~0;
3689
3690 /* We don't have to lock device here. Connections are always
3691 * added and removed with TX task disabled. */
3692
3693 rcu_read_lock();
3694
3695 list_for_each_entry_rcu(c, &h->list, list) {
3696 if (c->type != type || skb_queue_empty(&c->data_q))
3697 continue;
3698
3699 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3700 continue;
3701
3702 num++;
3703
3704 if (c->sent < min) {
3705 min = c->sent;
3706 conn = c;
3707 }
3708
3709 if (hci_conn_num(hdev, type) == num)
3710 break;
3711 }
3712
3713 rcu_read_unlock();
3714
3715 if (conn) {
3716 int cnt, q;
3717
3718 switch (conn->type) {
3719 case ACL_LINK:
3720 cnt = hdev->acl_cnt;
3721 break;
3722 case SCO_LINK:
3723 case ESCO_LINK:
3724 cnt = hdev->sco_cnt;
3725 break;
3726 case LE_LINK:
3727 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3728 break;
3729 default:
3730 cnt = 0;
3731 bt_dev_err(hdev, "unknown link type %d", conn->type);
3732 }
3733
3734 q = cnt / num;
3735 *quote = q ? q : 1;
3736 } else
3737 *quote = 0;
3738
3739 BT_DBG("conn %p quote %d", conn, *quote);
3740 return conn;
3741 }
3742
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3743 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3744 {
3745 struct hci_conn_hash *h = &hdev->conn_hash;
3746 struct hci_conn *c;
3747
3748 bt_dev_err(hdev, "link tx timeout");
3749
3750 rcu_read_lock();
3751
3752 /* Kill stalled connections */
3753 list_for_each_entry_rcu(c, &h->list, list) {
3754 if (c->type == type && c->sent) {
3755 bt_dev_err(hdev, "killing stalled connection %pMR",
3756 &c->dst);
3757 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3758 }
3759 }
3760
3761 rcu_read_unlock();
3762 }
3763
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3764 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3765 int *quote)
3766 {
3767 struct hci_conn_hash *h = &hdev->conn_hash;
3768 struct hci_chan *chan = NULL;
3769 unsigned int num = 0, min = ~0, cur_prio = 0;
3770 struct hci_conn *conn;
3771 int cnt, q, conn_num = 0;
3772
3773 BT_DBG("%s", hdev->name);
3774
3775 rcu_read_lock();
3776
3777 list_for_each_entry_rcu(conn, &h->list, list) {
3778 struct hci_chan *tmp;
3779
3780 if (conn->type != type)
3781 continue;
3782
3783 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3784 continue;
3785
3786 conn_num++;
3787
3788 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3789 struct sk_buff *skb;
3790
3791 if (skb_queue_empty(&tmp->data_q))
3792 continue;
3793
3794 skb = skb_peek(&tmp->data_q);
3795 if (skb->priority < cur_prio)
3796 continue;
3797
3798 if (skb->priority > cur_prio) {
3799 num = 0;
3800 min = ~0;
3801 cur_prio = skb->priority;
3802 }
3803
3804 num++;
3805
3806 if (conn->sent < min) {
3807 min = conn->sent;
3808 chan = tmp;
3809 }
3810 }
3811
3812 if (hci_conn_num(hdev, type) == conn_num)
3813 break;
3814 }
3815
3816 rcu_read_unlock();
3817
3818 if (!chan)
3819 return NULL;
3820
3821 switch (chan->conn->type) {
3822 case ACL_LINK:
3823 cnt = hdev->acl_cnt;
3824 break;
3825 case AMP_LINK:
3826 cnt = hdev->block_cnt;
3827 break;
3828 case SCO_LINK:
3829 case ESCO_LINK:
3830 cnt = hdev->sco_cnt;
3831 break;
3832 case LE_LINK:
3833 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3834 break;
3835 default:
3836 cnt = 0;
3837 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3838 }
3839
3840 q = cnt / num;
3841 *quote = q ? q : 1;
3842 BT_DBG("chan %p quote %d", chan, *quote);
3843 return chan;
3844 }
3845
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3846 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3847 {
3848 struct hci_conn_hash *h = &hdev->conn_hash;
3849 struct hci_conn *conn;
3850 int num = 0;
3851
3852 BT_DBG("%s", hdev->name);
3853
3854 rcu_read_lock();
3855
3856 list_for_each_entry_rcu(conn, &h->list, list) {
3857 struct hci_chan *chan;
3858
3859 if (conn->type != type)
3860 continue;
3861
3862 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3863 continue;
3864
3865 num++;
3866
3867 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3868 struct sk_buff *skb;
3869
3870 if (chan->sent) {
3871 chan->sent = 0;
3872 continue;
3873 }
3874
3875 if (skb_queue_empty(&chan->data_q))
3876 continue;
3877
3878 skb = skb_peek(&chan->data_q);
3879 if (skb->priority >= HCI_PRIO_MAX - 1)
3880 continue;
3881
3882 skb->priority = HCI_PRIO_MAX - 1;
3883
3884 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3885 skb->priority);
3886 }
3887
3888 if (hci_conn_num(hdev, type) == num)
3889 break;
3890 }
3891
3892 rcu_read_unlock();
3893
3894 }
3895
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)3896 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3897 {
3898 /* Calculate count of blocks used by this packet */
3899 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3900 }
3901
__check_timeout(struct hci_dev * hdev,unsigned int cnt)3902 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3903 {
3904 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3905 /* ACL tx timeout must be longer than maximum
3906 * link supervision timeout (40.9 seconds) */
3907 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3908 HCI_ACL_TX_TIMEOUT))
3909 hci_link_tx_to(hdev, ACL_LINK);
3910 }
3911 }
3912
hci_sched_acl_pkt(struct hci_dev * hdev)3913 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3914 {
3915 unsigned int cnt = hdev->acl_cnt;
3916 struct hci_chan *chan;
3917 struct sk_buff *skb;
3918 int quote;
3919
3920 __check_timeout(hdev, cnt);
3921
3922 while (hdev->acl_cnt &&
3923 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3924 u32 priority = (skb_peek(&chan->data_q))->priority;
3925 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3926 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3927 skb->len, skb->priority);
3928
3929 /* Stop if priority has changed */
3930 if (skb->priority < priority)
3931 break;
3932
3933 skb = skb_dequeue(&chan->data_q);
3934
3935 hci_conn_enter_active_mode(chan->conn,
3936 bt_cb(skb)->force_active);
3937
3938 hci_send_frame(hdev, skb);
3939 hdev->acl_last_tx = jiffies;
3940
3941 hdev->acl_cnt--;
3942 chan->sent++;
3943 chan->conn->sent++;
3944 }
3945 }
3946
3947 if (cnt != hdev->acl_cnt)
3948 hci_prio_recalculate(hdev, ACL_LINK);
3949 }
3950
hci_sched_acl_blk(struct hci_dev * hdev)3951 static void hci_sched_acl_blk(struct hci_dev *hdev)
3952 {
3953 unsigned int cnt = hdev->block_cnt;
3954 struct hci_chan *chan;
3955 struct sk_buff *skb;
3956 int quote;
3957 u8 type;
3958
3959 __check_timeout(hdev, cnt);
3960
3961 BT_DBG("%s", hdev->name);
3962
3963 if (hdev->dev_type == HCI_AMP)
3964 type = AMP_LINK;
3965 else
3966 type = ACL_LINK;
3967
3968 while (hdev->block_cnt > 0 &&
3969 (chan = hci_chan_sent(hdev, type, "e))) {
3970 u32 priority = (skb_peek(&chan->data_q))->priority;
3971 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3972 int blocks;
3973
3974 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3975 skb->len, skb->priority);
3976
3977 /* Stop if priority has changed */
3978 if (skb->priority < priority)
3979 break;
3980
3981 skb = skb_dequeue(&chan->data_q);
3982
3983 blocks = __get_blocks(hdev, skb);
3984 if (blocks > hdev->block_cnt)
3985 return;
3986
3987 hci_conn_enter_active_mode(chan->conn,
3988 bt_cb(skb)->force_active);
3989
3990 hci_send_frame(hdev, skb);
3991 hdev->acl_last_tx = jiffies;
3992
3993 hdev->block_cnt -= blocks;
3994 quote -= blocks;
3995
3996 chan->sent += blocks;
3997 chan->conn->sent += blocks;
3998 }
3999 }
4000
4001 if (cnt != hdev->block_cnt)
4002 hci_prio_recalculate(hdev, type);
4003 }
4004
hci_sched_acl(struct hci_dev * hdev)4005 static void hci_sched_acl(struct hci_dev *hdev)
4006 {
4007 BT_DBG("%s", hdev->name);
4008
4009 /* No ACL link over BR/EDR controller */
4010 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4011 return;
4012
4013 /* No AMP link over AMP controller */
4014 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4015 return;
4016
4017 switch (hdev->flow_ctl_mode) {
4018 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4019 hci_sched_acl_pkt(hdev);
4020 break;
4021
4022 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4023 hci_sched_acl_blk(hdev);
4024 break;
4025 }
4026 }
4027
4028 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)4029 static void hci_sched_sco(struct hci_dev *hdev)
4030 {
4031 struct hci_conn *conn;
4032 struct sk_buff *skb;
4033 int quote;
4034
4035 BT_DBG("%s", hdev->name);
4036
4037 if (!hci_conn_num(hdev, SCO_LINK))
4038 return;
4039
4040 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4041 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4042 BT_DBG("skb %p len %d", skb, skb->len);
4043 hci_send_frame(hdev, skb);
4044
4045 conn->sent++;
4046 if (conn->sent == ~0)
4047 conn->sent = 0;
4048 }
4049 }
4050 }
4051
hci_sched_esco(struct hci_dev * hdev)4052 static void hci_sched_esco(struct hci_dev *hdev)
4053 {
4054 struct hci_conn *conn;
4055 struct sk_buff *skb;
4056 int quote;
4057
4058 BT_DBG("%s", hdev->name);
4059
4060 if (!hci_conn_num(hdev, ESCO_LINK))
4061 return;
4062
4063 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4064 "e))) {
4065 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4066 BT_DBG("skb %p len %d", skb, skb->len);
4067 hci_send_frame(hdev, skb);
4068
4069 conn->sent++;
4070 if (conn->sent == ~0)
4071 conn->sent = 0;
4072 }
4073 }
4074 }
4075
hci_sched_le(struct hci_dev * hdev)4076 static void hci_sched_le(struct hci_dev *hdev)
4077 {
4078 struct hci_chan *chan;
4079 struct sk_buff *skb;
4080 int quote, cnt, tmp;
4081
4082 BT_DBG("%s", hdev->name);
4083
4084 if (!hci_conn_num(hdev, LE_LINK))
4085 return;
4086
4087 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4088 /* LE tx timeout must be longer than maximum
4089 * link supervision timeout (40.9 seconds) */
4090 if (!hdev->le_cnt && hdev->le_pkts &&
4091 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4092 hci_link_tx_to(hdev, LE_LINK);
4093 }
4094
4095 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4096 tmp = cnt;
4097 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4098 u32 priority = (skb_peek(&chan->data_q))->priority;
4099 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4100 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4101 skb->len, skb->priority);
4102
4103 /* Stop if priority has changed */
4104 if (skb->priority < priority)
4105 break;
4106
4107 skb = skb_dequeue(&chan->data_q);
4108
4109 hci_send_frame(hdev, skb);
4110 hdev->le_last_tx = jiffies;
4111
4112 cnt--;
4113 chan->sent++;
4114 chan->conn->sent++;
4115 }
4116 }
4117
4118 if (hdev->le_pkts)
4119 hdev->le_cnt = cnt;
4120 else
4121 hdev->acl_cnt = cnt;
4122
4123 if (cnt != tmp)
4124 hci_prio_recalculate(hdev, LE_LINK);
4125 }
4126
hci_tx_work(struct work_struct * work)4127 static void hci_tx_work(struct work_struct *work)
4128 {
4129 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4130 struct sk_buff *skb;
4131
4132 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4133 hdev->sco_cnt, hdev->le_cnt);
4134
4135 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4136 /* Schedule queues and send stuff to HCI driver */
4137 hci_sched_acl(hdev);
4138 hci_sched_sco(hdev);
4139 hci_sched_esco(hdev);
4140 hci_sched_le(hdev);
4141 }
4142
4143 /* Send next queued raw (unknown type) packet */
4144 while ((skb = skb_dequeue(&hdev->raw_q)))
4145 hci_send_frame(hdev, skb);
4146 }
4147
4148 /* ----- HCI RX task (incoming data processing) ----- */
4149
4150 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)4151 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4152 {
4153 struct hci_acl_hdr *hdr = (void *) skb->data;
4154 struct hci_conn *conn;
4155 __u16 handle, flags;
4156
4157 skb_pull(skb, HCI_ACL_HDR_SIZE);
4158
4159 handle = __le16_to_cpu(hdr->handle);
4160 flags = hci_flags(handle);
4161 handle = hci_handle(handle);
4162
4163 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4164 handle, flags);
4165
4166 hdev->stat.acl_rx++;
4167
4168 hci_dev_lock(hdev);
4169 conn = hci_conn_hash_lookup_handle(hdev, handle);
4170 hci_dev_unlock(hdev);
4171
4172 if (conn) {
4173 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4174
4175 /* Send to upper protocol */
4176 l2cap_recv_acldata(conn, skb, flags);
4177 return;
4178 } else {
4179 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4180 handle);
4181 }
4182
4183 kfree_skb(skb);
4184 }
4185
4186 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)4187 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4188 {
4189 struct hci_sco_hdr *hdr = (void *) skb->data;
4190 struct hci_conn *conn;
4191 __u16 handle;
4192
4193 skb_pull(skb, HCI_SCO_HDR_SIZE);
4194
4195 handle = __le16_to_cpu(hdr->handle);
4196
4197 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4198
4199 hdev->stat.sco_rx++;
4200
4201 hci_dev_lock(hdev);
4202 conn = hci_conn_hash_lookup_handle(hdev, handle);
4203 hci_dev_unlock(hdev);
4204
4205 if (conn) {
4206 /* Send to upper protocol */
4207 sco_recv_scodata(conn, skb);
4208 return;
4209 } else {
4210 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4211 handle);
4212 }
4213
4214 kfree_skb(skb);
4215 }
4216
hci_req_is_complete(struct hci_dev * hdev)4217 static bool hci_req_is_complete(struct hci_dev *hdev)
4218 {
4219 struct sk_buff *skb;
4220
4221 skb = skb_peek(&hdev->cmd_q);
4222 if (!skb)
4223 return true;
4224
4225 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4226 }
4227
hci_resend_last(struct hci_dev * hdev)4228 static void hci_resend_last(struct hci_dev *hdev)
4229 {
4230 struct hci_command_hdr *sent;
4231 struct sk_buff *skb;
4232 u16 opcode;
4233
4234 if (!hdev->sent_cmd)
4235 return;
4236
4237 sent = (void *) hdev->sent_cmd->data;
4238 opcode = __le16_to_cpu(sent->opcode);
4239 if (opcode == HCI_OP_RESET)
4240 return;
4241
4242 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4243 if (!skb)
4244 return;
4245
4246 skb_queue_head(&hdev->cmd_q, skb);
4247 queue_work(hdev->workqueue, &hdev->cmd_work);
4248 }
4249
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4250 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4251 hci_req_complete_t *req_complete,
4252 hci_req_complete_skb_t *req_complete_skb)
4253 {
4254 struct sk_buff *skb;
4255 unsigned long flags;
4256
4257 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4258
4259 /* If the completed command doesn't match the last one that was
4260 * sent we need to do special handling of it.
4261 */
4262 if (!hci_sent_cmd_data(hdev, opcode)) {
4263 /* Some CSR based controllers generate a spontaneous
4264 * reset complete event during init and any pending
4265 * command will never be completed. In such a case we
4266 * need to resend whatever was the last sent
4267 * command.
4268 */
4269 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4270 hci_resend_last(hdev);
4271
4272 return;
4273 }
4274
4275 /* If the command succeeded and there's still more commands in
4276 * this request the request is not yet complete.
4277 */
4278 if (!status && !hci_req_is_complete(hdev))
4279 return;
4280
4281 /* If this was the last command in a request the complete
4282 * callback would be found in hdev->sent_cmd instead of the
4283 * command queue (hdev->cmd_q).
4284 */
4285 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4286 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4287 return;
4288 }
4289
4290 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4291 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4292 return;
4293 }
4294
4295 /* Remove all pending commands belonging to this request */
4296 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4297 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4298 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4299 __skb_queue_head(&hdev->cmd_q, skb);
4300 break;
4301 }
4302
4303 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4304 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4305 else
4306 *req_complete = bt_cb(skb)->hci.req_complete;
4307 kfree_skb(skb);
4308 }
4309 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4310 }
4311
hci_rx_work(struct work_struct * work)4312 static void hci_rx_work(struct work_struct *work)
4313 {
4314 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4315 struct sk_buff *skb;
4316
4317 BT_DBG("%s", hdev->name);
4318
4319 while ((skb = skb_dequeue(&hdev->rx_q))) {
4320 /* Send copy to monitor */
4321 hci_send_to_monitor(hdev, skb);
4322
4323 if (atomic_read(&hdev->promisc)) {
4324 /* Send copy to the sockets */
4325 hci_send_to_sock(hdev, skb);
4326 }
4327
4328 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4329 kfree_skb(skb);
4330 continue;
4331 }
4332
4333 if (test_bit(HCI_INIT, &hdev->flags)) {
4334 /* Don't process data packets in this states. */
4335 switch (hci_skb_pkt_type(skb)) {
4336 case HCI_ACLDATA_PKT:
4337 case HCI_SCODATA_PKT:
4338 kfree_skb(skb);
4339 continue;
4340 }
4341 }
4342
4343 /* Process frame */
4344 switch (hci_skb_pkt_type(skb)) {
4345 case HCI_EVENT_PKT:
4346 BT_DBG("%s Event packet", hdev->name);
4347 hci_event_packet(hdev, skb);
4348 break;
4349
4350 case HCI_ACLDATA_PKT:
4351 BT_DBG("%s ACL data packet", hdev->name);
4352 hci_acldata_packet(hdev, skb);
4353 break;
4354
4355 case HCI_SCODATA_PKT:
4356 BT_DBG("%s SCO data packet", hdev->name);
4357 hci_scodata_packet(hdev, skb);
4358 break;
4359
4360 default:
4361 kfree_skb(skb);
4362 break;
4363 }
4364 }
4365 }
4366
hci_cmd_work(struct work_struct * work)4367 static void hci_cmd_work(struct work_struct *work)
4368 {
4369 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4370 struct sk_buff *skb;
4371
4372 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4373 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4374
4375 /* Send queued commands */
4376 if (atomic_read(&hdev->cmd_cnt)) {
4377 skb = skb_dequeue(&hdev->cmd_q);
4378 if (!skb)
4379 return;
4380
4381 kfree_skb(hdev->sent_cmd);
4382
4383 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4384 if (hdev->sent_cmd) {
4385 atomic_dec(&hdev->cmd_cnt);
4386 hci_send_frame(hdev, skb);
4387 if (test_bit(HCI_RESET, &hdev->flags))
4388 cancel_delayed_work(&hdev->cmd_timer);
4389 else
4390 schedule_delayed_work(&hdev->cmd_timer,
4391 HCI_CMD_TIMEOUT);
4392 } else {
4393 skb_queue_head(&hdev->cmd_q, skb);
4394 queue_work(hdev->workqueue, &hdev->cmd_work);
4395 }
4396 }
4397 }
4398