1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
41
42 /* Handle HCI Event packets */
43
hci_cc_inquiry_cancel(struct hci_dev * hdev,struct sk_buff * skb)44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 {
46 __u8 status = *((__u8 *) skb->data);
47
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
49
50 if (status)
51 return;
52
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
56
57 hci_dev_lock(hdev);
58 /* Set discovery state to stopped if we're not doing LE active
59 * scanning.
60 */
61 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 hdev->le_scan_type != LE_SCAN_ACTIVE)
63 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
64 hci_dev_unlock(hdev);
65
66 hci_conn_check_pending(hdev);
67 }
68
hci_cc_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)69 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
70 {
71 __u8 status = *((__u8 *) skb->data);
72
73 BT_DBG("%s status 0x%2.2x", hdev->name, status);
74
75 if (status)
76 return;
77
78 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
79 }
80
hci_cc_exit_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)81 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
82 {
83 __u8 status = *((__u8 *) skb->data);
84
85 BT_DBG("%s status 0x%2.2x", hdev->name, status);
86
87 if (status)
88 return;
89
90 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
91
92 hci_conn_check_pending(hdev);
93 }
94
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,struct sk_buff * skb)95 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
96 struct sk_buff *skb)
97 {
98 BT_DBG("%s", hdev->name);
99 }
100
hci_cc_role_discovery(struct hci_dev * hdev,struct sk_buff * skb)101 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
102 {
103 struct hci_rp_role_discovery *rp = (void *) skb->data;
104 struct hci_conn *conn;
105
106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
107
108 if (rp->status)
109 return;
110
111 hci_dev_lock(hdev);
112
113 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
114 if (conn)
115 conn->role = rp->role;
116
117 hci_dev_unlock(hdev);
118 }
119
hci_cc_read_link_policy(struct hci_dev * hdev,struct sk_buff * skb)120 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
121 {
122 struct hci_rp_read_link_policy *rp = (void *) skb->data;
123 struct hci_conn *conn;
124
125 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
126
127 if (rp->status)
128 return;
129
130 hci_dev_lock(hdev);
131
132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 if (conn)
134 conn->link_policy = __le16_to_cpu(rp->policy);
135
136 hci_dev_unlock(hdev);
137 }
138
hci_cc_write_link_policy(struct hci_dev * hdev,struct sk_buff * skb)139 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 {
141 struct hci_rp_write_link_policy *rp = (void *) skb->data;
142 struct hci_conn *conn;
143 void *sent;
144
145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
146
147 if (rp->status)
148 return;
149
150 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
151 if (!sent)
152 return;
153
154 hci_dev_lock(hdev);
155
156 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
157 if (conn)
158 conn->link_policy = get_unaligned_le16(sent + 2);
159
160 hci_dev_unlock(hdev);
161 }
162
hci_cc_read_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)163 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
164 struct sk_buff *skb)
165 {
166 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
167
168 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
169
170 if (rp->status)
171 return;
172
173 hdev->link_policy = __le16_to_cpu(rp->policy);
174 }
175
hci_cc_write_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)176 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
177 struct sk_buff *skb)
178 {
179 __u8 status = *((__u8 *) skb->data);
180 void *sent;
181
182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
183
184 if (status)
185 return;
186
187 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
188 if (!sent)
189 return;
190
191 hdev->link_policy = get_unaligned_le16(sent);
192 }
193
hci_cc_reset(struct hci_dev * hdev,struct sk_buff * skb)194 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
195 {
196 __u8 status = *((__u8 *) skb->data);
197
198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
199
200 clear_bit(HCI_RESET, &hdev->flags);
201
202 if (status)
203 return;
204
205 /* Reset all non-persistent flags */
206 hci_dev_clear_volatile_flags(hdev);
207
208 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
209
210 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
211 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
212
213 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
214 hdev->adv_data_len = 0;
215
216 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
217 hdev->scan_rsp_data_len = 0;
218
219 hdev->le_scan_type = LE_SCAN_PASSIVE;
220
221 hdev->ssp_debug_mode = 0;
222
223 hci_bdaddr_list_clear(&hdev->le_white_list);
224 hci_bdaddr_list_clear(&hdev->le_resolv_list);
225 }
226
hci_cc_read_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
228 struct sk_buff *skb)
229 {
230 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
231 struct hci_cp_read_stored_link_key *sent;
232
233 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
234
235 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
236 if (!sent)
237 return;
238
239 if (!rp->status && sent->read_all == 0x01) {
240 hdev->stored_max_keys = rp->max_keys;
241 hdev->stored_num_keys = rp->num_keys;
242 }
243 }
244
hci_cc_delete_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)245 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
246 struct sk_buff *skb)
247 {
248 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
249
250 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
251
252 if (rp->status)
253 return;
254
255 if (rp->num_keys <= hdev->stored_num_keys)
256 hdev->stored_num_keys -= rp->num_keys;
257 else
258 hdev->stored_num_keys = 0;
259 }
260
hci_cc_write_local_name(struct hci_dev * hdev,struct sk_buff * skb)261 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
262 {
263 __u8 status = *((__u8 *) skb->data);
264 void *sent;
265
266 BT_DBG("%s status 0x%2.2x", hdev->name, status);
267
268 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
269 if (!sent)
270 return;
271
272 hci_dev_lock(hdev);
273
274 if (hci_dev_test_flag(hdev, HCI_MGMT))
275 mgmt_set_local_name_complete(hdev, sent, status);
276 else if (!status)
277 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
278
279 hci_dev_unlock(hdev);
280 }
281
hci_cc_read_local_name(struct hci_dev * hdev,struct sk_buff * skb)282 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
283 {
284 struct hci_rp_read_local_name *rp = (void *) skb->data;
285
286 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
287
288 if (rp->status)
289 return;
290
291 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
292 hci_dev_test_flag(hdev, HCI_CONFIG))
293 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
294 }
295
hci_cc_write_auth_enable(struct hci_dev * hdev,struct sk_buff * skb)296 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
297 {
298 __u8 status = *((__u8 *) skb->data);
299 void *sent;
300
301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
302
303 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
304 if (!sent)
305 return;
306
307 hci_dev_lock(hdev);
308
309 if (!status) {
310 __u8 param = *((__u8 *) sent);
311
312 if (param == AUTH_ENABLED)
313 set_bit(HCI_AUTH, &hdev->flags);
314 else
315 clear_bit(HCI_AUTH, &hdev->flags);
316 }
317
318 if (hci_dev_test_flag(hdev, HCI_MGMT))
319 mgmt_auth_enable_complete(hdev, status);
320
321 hci_dev_unlock(hdev);
322 }
323
hci_cc_write_encrypt_mode(struct hci_dev * hdev,struct sk_buff * skb)324 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
325 {
326 __u8 status = *((__u8 *) skb->data);
327 __u8 param;
328 void *sent;
329
330 BT_DBG("%s status 0x%2.2x", hdev->name, status);
331
332 if (status)
333 return;
334
335 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
336 if (!sent)
337 return;
338
339 param = *((__u8 *) sent);
340
341 if (param)
342 set_bit(HCI_ENCRYPT, &hdev->flags);
343 else
344 clear_bit(HCI_ENCRYPT, &hdev->flags);
345 }
346
hci_cc_write_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)347 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 __u8 status = *((__u8 *) skb->data);
350 __u8 param;
351 void *sent;
352
353 BT_DBG("%s status 0x%2.2x", hdev->name, status);
354
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
356 if (!sent)
357 return;
358
359 param = *((__u8 *) sent);
360
361 hci_dev_lock(hdev);
362
363 if (status) {
364 hdev->discov_timeout = 0;
365 goto done;
366 }
367
368 if (param & SCAN_INQUIRY)
369 set_bit(HCI_ISCAN, &hdev->flags);
370 else
371 clear_bit(HCI_ISCAN, &hdev->flags);
372
373 if (param & SCAN_PAGE)
374 set_bit(HCI_PSCAN, &hdev->flags);
375 else
376 clear_bit(HCI_PSCAN, &hdev->flags);
377
378 done:
379 hci_dev_unlock(hdev);
380 }
381
hci_cc_read_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)382 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
383 {
384 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
385
386 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
387
388 if (rp->status)
389 return;
390
391 memcpy(hdev->dev_class, rp->dev_class, 3);
392
393 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
394 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
395 }
396
hci_cc_write_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)397 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
398 {
399 __u8 status = *((__u8 *) skb->data);
400 void *sent;
401
402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
403
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
405 if (!sent)
406 return;
407
408 hci_dev_lock(hdev);
409
410 if (status == 0)
411 memcpy(hdev->dev_class, sent, 3);
412
413 if (hci_dev_test_flag(hdev, HCI_MGMT))
414 mgmt_set_class_of_dev_complete(hdev, sent, status);
415
416 hci_dev_unlock(hdev);
417 }
418
hci_cc_read_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)419 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
420 {
421 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
422 __u16 setting;
423
424 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
425
426 if (rp->status)
427 return;
428
429 setting = __le16_to_cpu(rp->voice_setting);
430
431 if (hdev->voice_setting == setting)
432 return;
433
434 hdev->voice_setting = setting;
435
436 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
437
438 if (hdev->notify)
439 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
440 }
441
hci_cc_write_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)442 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
443 struct sk_buff *skb)
444 {
445 __u8 status = *((__u8 *) skb->data);
446 __u16 setting;
447 void *sent;
448
449 BT_DBG("%s status 0x%2.2x", hdev->name, status);
450
451 if (status)
452 return;
453
454 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
455 if (!sent)
456 return;
457
458 setting = get_unaligned_le16(sent);
459
460 if (hdev->voice_setting == setting)
461 return;
462
463 hdev->voice_setting = setting;
464
465 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
466
467 if (hdev->notify)
468 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
469 }
470
hci_cc_read_num_supported_iac(struct hci_dev * hdev,struct sk_buff * skb)471 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
472 struct sk_buff *skb)
473 {
474 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
475
476 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
477
478 if (rp->status)
479 return;
480
481 hdev->num_iac = rp->num_iac;
482
483 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
484 }
485
hci_cc_write_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)486 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
487 {
488 __u8 status = *((__u8 *) skb->data);
489 struct hci_cp_write_ssp_mode *sent;
490
491 BT_DBG("%s status 0x%2.2x", hdev->name, status);
492
493 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
494 if (!sent)
495 return;
496
497 hci_dev_lock(hdev);
498
499 if (!status) {
500 if (sent->mode)
501 hdev->features[1][0] |= LMP_HOST_SSP;
502 else
503 hdev->features[1][0] &= ~LMP_HOST_SSP;
504 }
505
506 if (hci_dev_test_flag(hdev, HCI_MGMT))
507 mgmt_ssp_enable_complete(hdev, sent->mode, status);
508 else if (!status) {
509 if (sent->mode)
510 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
511 else
512 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
513 }
514
515 hci_dev_unlock(hdev);
516 }
517
hci_cc_write_sc_support(struct hci_dev * hdev,struct sk_buff * skb)518 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
519 {
520 u8 status = *((u8 *) skb->data);
521 struct hci_cp_write_sc_support *sent;
522
523 BT_DBG("%s status 0x%2.2x", hdev->name, status);
524
525 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
526 if (!sent)
527 return;
528
529 hci_dev_lock(hdev);
530
531 if (!status) {
532 if (sent->support)
533 hdev->features[1][0] |= LMP_HOST_SC;
534 else
535 hdev->features[1][0] &= ~LMP_HOST_SC;
536 }
537
538 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
539 if (sent->support)
540 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
541 else
542 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
543 }
544
545 hci_dev_unlock(hdev);
546 }
547
hci_cc_read_local_version(struct hci_dev * hdev,struct sk_buff * skb)548 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
549 {
550 struct hci_rp_read_local_version *rp = (void *) skb->data;
551
552 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
553
554 if (rp->status)
555 return;
556
557 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
558 hci_dev_test_flag(hdev, HCI_CONFIG)) {
559 hdev->hci_ver = rp->hci_ver;
560 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
561 hdev->lmp_ver = rp->lmp_ver;
562 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
563 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
564 }
565 }
566
hci_cc_read_local_commands(struct hci_dev * hdev,struct sk_buff * skb)567 static void hci_cc_read_local_commands(struct hci_dev *hdev,
568 struct sk_buff *skb)
569 {
570 struct hci_rp_read_local_commands *rp = (void *) skb->data;
571
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573
574 if (rp->status)
575 return;
576
577 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
578 hci_dev_test_flag(hdev, HCI_CONFIG))
579 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
580 }
581
hci_cc_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)582 static void hci_cc_read_local_features(struct hci_dev *hdev,
583 struct sk_buff *skb)
584 {
585 struct hci_rp_read_local_features *rp = (void *) skb->data;
586
587 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
588
589 if (rp->status)
590 return;
591
592 memcpy(hdev->features, rp->features, 8);
593
594 /* Adjust default settings according to features
595 * supported by device. */
596
597 if (hdev->features[0][0] & LMP_3SLOT)
598 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
599
600 if (hdev->features[0][0] & LMP_5SLOT)
601 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
602
603 if (hdev->features[0][1] & LMP_HV2) {
604 hdev->pkt_type |= (HCI_HV2);
605 hdev->esco_type |= (ESCO_HV2);
606 }
607
608 if (hdev->features[0][1] & LMP_HV3) {
609 hdev->pkt_type |= (HCI_HV3);
610 hdev->esco_type |= (ESCO_HV3);
611 }
612
613 if (lmp_esco_capable(hdev))
614 hdev->esco_type |= (ESCO_EV3);
615
616 if (hdev->features[0][4] & LMP_EV4)
617 hdev->esco_type |= (ESCO_EV4);
618
619 if (hdev->features[0][4] & LMP_EV5)
620 hdev->esco_type |= (ESCO_EV5);
621
622 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
623 hdev->esco_type |= (ESCO_2EV3);
624
625 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
626 hdev->esco_type |= (ESCO_3EV3);
627
628 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
629 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
630 }
631
hci_cc_read_local_ext_features(struct hci_dev * hdev,struct sk_buff * skb)632 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
633 struct sk_buff *skb)
634 {
635 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
636
637 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
638
639 if (rp->status)
640 return;
641
642 if (hdev->max_page < rp->max_page)
643 hdev->max_page = rp->max_page;
644
645 if (rp->page < HCI_MAX_PAGES)
646 memcpy(hdev->features[rp->page], rp->features, 8);
647 }
648
hci_cc_read_flow_control_mode(struct hci_dev * hdev,struct sk_buff * skb)649 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
650 struct sk_buff *skb)
651 {
652 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
653
654 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
655
656 if (rp->status)
657 return;
658
659 hdev->flow_ctl_mode = rp->mode;
660 }
661
hci_cc_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)662 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
663 {
664 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
665
666 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
667
668 if (rp->status)
669 return;
670
671 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
672 hdev->sco_mtu = rp->sco_mtu;
673 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
674 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
675
676 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
677 hdev->sco_mtu = 64;
678 hdev->sco_pkts = 8;
679 }
680
681 hdev->acl_cnt = hdev->acl_pkts;
682 hdev->sco_cnt = hdev->sco_pkts;
683
684 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
685 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
686 }
687
hci_cc_read_bd_addr(struct hci_dev * hdev,struct sk_buff * skb)688 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
689 {
690 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
691
692 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
693
694 if (rp->status)
695 return;
696
697 if (test_bit(HCI_INIT, &hdev->flags))
698 bacpy(&hdev->bdaddr, &rp->bdaddr);
699
700 if (hci_dev_test_flag(hdev, HCI_SETUP))
701 bacpy(&hdev->setup_addr, &rp->bdaddr);
702 }
703
hci_cc_read_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)704 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
705 struct sk_buff *skb)
706 {
707 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
708
709 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
710
711 if (rp->status)
712 return;
713
714 if (test_bit(HCI_INIT, &hdev->flags)) {
715 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
716 hdev->page_scan_window = __le16_to_cpu(rp->window);
717 }
718 }
719
hci_cc_write_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)720 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
721 struct sk_buff *skb)
722 {
723 u8 status = *((u8 *) skb->data);
724 struct hci_cp_write_page_scan_activity *sent;
725
726 BT_DBG("%s status 0x%2.2x", hdev->name, status);
727
728 if (status)
729 return;
730
731 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
732 if (!sent)
733 return;
734
735 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
736 hdev->page_scan_window = __le16_to_cpu(sent->window);
737 }
738
hci_cc_read_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)739 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
740 struct sk_buff *skb)
741 {
742 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
743
744 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
745
746 if (rp->status)
747 return;
748
749 if (test_bit(HCI_INIT, &hdev->flags))
750 hdev->page_scan_type = rp->type;
751 }
752
hci_cc_write_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)753 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
754 struct sk_buff *skb)
755 {
756 u8 status = *((u8 *) skb->data);
757 u8 *type;
758
759 BT_DBG("%s status 0x%2.2x", hdev->name, status);
760
761 if (status)
762 return;
763
764 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
765 if (type)
766 hdev->page_scan_type = *type;
767 }
768
hci_cc_read_data_block_size(struct hci_dev * hdev,struct sk_buff * skb)769 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
770 struct sk_buff *skb)
771 {
772 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
773
774 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
775
776 if (rp->status)
777 return;
778
779 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
780 hdev->block_len = __le16_to_cpu(rp->block_len);
781 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
782
783 hdev->block_cnt = hdev->num_blocks;
784
785 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
786 hdev->block_cnt, hdev->block_len);
787 }
788
hci_cc_read_clock(struct hci_dev * hdev,struct sk_buff * skb)789 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
790 {
791 struct hci_rp_read_clock *rp = (void *) skb->data;
792 struct hci_cp_read_clock *cp;
793 struct hci_conn *conn;
794
795 BT_DBG("%s", hdev->name);
796
797 if (skb->len < sizeof(*rp))
798 return;
799
800 if (rp->status)
801 return;
802
803 hci_dev_lock(hdev);
804
805 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
806 if (!cp)
807 goto unlock;
808
809 if (cp->which == 0x00) {
810 hdev->clock = le32_to_cpu(rp->clock);
811 goto unlock;
812 }
813
814 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
815 if (conn) {
816 conn->clock = le32_to_cpu(rp->clock);
817 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
818 }
819
820 unlock:
821 hci_dev_unlock(hdev);
822 }
823
hci_cc_read_local_amp_info(struct hci_dev * hdev,struct sk_buff * skb)824 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
825 struct sk_buff *skb)
826 {
827 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
828
829 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
830
831 if (rp->status)
832 return;
833
834 hdev->amp_status = rp->amp_status;
835 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
836 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
837 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
838 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
839 hdev->amp_type = rp->amp_type;
840 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
841 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
842 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
843 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
844 }
845
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,struct sk_buff * skb)846 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
847 struct sk_buff *skb)
848 {
849 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
850
851 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
852
853 if (rp->status)
854 return;
855
856 hdev->inq_tx_power = rp->tx_power;
857 }
858
hci_cc_pin_code_reply(struct hci_dev * hdev,struct sk_buff * skb)859 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
860 {
861 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
862 struct hci_cp_pin_code_reply *cp;
863 struct hci_conn *conn;
864
865 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
866
867 hci_dev_lock(hdev);
868
869 if (hci_dev_test_flag(hdev, HCI_MGMT))
870 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
871
872 if (rp->status)
873 goto unlock;
874
875 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
876 if (!cp)
877 goto unlock;
878
879 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
880 if (conn)
881 conn->pin_length = cp->pin_len;
882
883 unlock:
884 hci_dev_unlock(hdev);
885 }
886
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)887 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
888 {
889 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
890
891 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
892
893 hci_dev_lock(hdev);
894
895 if (hci_dev_test_flag(hdev, HCI_MGMT))
896 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
897 rp->status);
898
899 hci_dev_unlock(hdev);
900 }
901
hci_cc_le_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)902 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
903 struct sk_buff *skb)
904 {
905 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
906
907 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
908
909 if (rp->status)
910 return;
911
912 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
913 hdev->le_pkts = rp->le_max_pkt;
914
915 hdev->le_cnt = hdev->le_pkts;
916
917 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
918 }
919
hci_cc_le_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)920 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
921 struct sk_buff *skb)
922 {
923 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
924
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
926
927 if (rp->status)
928 return;
929
930 memcpy(hdev->le_features, rp->features, 8);
931 }
932
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,struct sk_buff * skb)933 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
934 struct sk_buff *skb)
935 {
936 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
937
938 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
939
940 if (rp->status)
941 return;
942
943 hdev->adv_tx_power = rp->tx_power;
944 }
945
hci_cc_user_confirm_reply(struct hci_dev * hdev,struct sk_buff * skb)946 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
947 {
948 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
949
950 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
951
952 hci_dev_lock(hdev);
953
954 if (hci_dev_test_flag(hdev, HCI_MGMT))
955 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
956 rp->status);
957
958 hci_dev_unlock(hdev);
959 }
960
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)961 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
962 struct sk_buff *skb)
963 {
964 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
965
966 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
967
968 hci_dev_lock(hdev);
969
970 if (hci_dev_test_flag(hdev, HCI_MGMT))
971 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
972 ACL_LINK, 0, rp->status);
973
974 hci_dev_unlock(hdev);
975 }
976
hci_cc_user_passkey_reply(struct hci_dev * hdev,struct sk_buff * skb)977 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
978 {
979 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
980
981 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
982
983 hci_dev_lock(hdev);
984
985 if (hci_dev_test_flag(hdev, HCI_MGMT))
986 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
987 0, rp->status);
988
989 hci_dev_unlock(hdev);
990 }
991
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)992 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
993 struct sk_buff *skb)
994 {
995 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
996
997 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
998
999 hci_dev_lock(hdev);
1000
1001 if (hci_dev_test_flag(hdev, HCI_MGMT))
1002 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1003 ACL_LINK, 0, rp->status);
1004
1005 hci_dev_unlock(hdev);
1006 }
1007
hci_cc_read_local_oob_data(struct hci_dev * hdev,struct sk_buff * skb)1008 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1010 {
1011 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1012
1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1014 }
1015
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,struct sk_buff * skb)1016 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1017 struct sk_buff *skb)
1018 {
1019 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1020
1021 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1022 }
1023
hci_cc_le_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1024 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1025 {
1026 __u8 status = *((__u8 *) skb->data);
1027 bdaddr_t *sent;
1028
1029 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1030
1031 if (status)
1032 return;
1033
1034 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1035 if (!sent)
1036 return;
1037
1038 hci_dev_lock(hdev);
1039
1040 bacpy(&hdev->random_addr, sent);
1041
1042 hci_dev_unlock(hdev);
1043 }
1044
hci_cc_le_set_default_phy(struct hci_dev * hdev,struct sk_buff * skb)1045 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1046 {
1047 __u8 status = *((__u8 *) skb->data);
1048 struct hci_cp_le_set_default_phy *cp;
1049
1050 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1051
1052 if (status)
1053 return;
1054
1055 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1056 if (!cp)
1057 return;
1058
1059 hci_dev_lock(hdev);
1060
1061 hdev->le_tx_def_phys = cp->tx_phys;
1062 hdev->le_rx_def_phys = cp->rx_phys;
1063
1064 hci_dev_unlock(hdev);
1065 }
1066
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1067 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1068 struct sk_buff *skb)
1069 {
1070 __u8 status = *((__u8 *) skb->data);
1071 struct hci_cp_le_set_adv_set_rand_addr *cp;
1072 struct adv_info *adv_instance;
1073
1074 if (status)
1075 return;
1076
1077 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1078 if (!cp)
1079 return;
1080
1081 hci_dev_lock(hdev);
1082
1083 if (!hdev->cur_adv_instance) {
1084 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1085 bacpy(&hdev->random_addr, &cp->bdaddr);
1086 } else {
1087 adv_instance = hci_find_adv_instance(hdev,
1088 hdev->cur_adv_instance);
1089 if (adv_instance)
1090 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1091 }
1092
1093 hci_dev_unlock(hdev);
1094 }
1095
hci_cc_le_set_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1096 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1097 {
1098 __u8 *sent, status = *((__u8 *) skb->data);
1099
1100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1101
1102 if (status)
1103 return;
1104
1105 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1106 if (!sent)
1107 return;
1108
1109 hci_dev_lock(hdev);
1110
1111 /* If we're doing connection initiation as peripheral. Set a
1112 * timeout in case something goes wrong.
1113 */
1114 if (*sent) {
1115 struct hci_conn *conn;
1116
1117 hci_dev_set_flag(hdev, HCI_LE_ADV);
1118
1119 conn = hci_lookup_le_connect(hdev);
1120 if (conn)
1121 queue_delayed_work(hdev->workqueue,
1122 &conn->le_conn_timeout,
1123 conn->conn_timeout);
1124 } else {
1125 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1126 }
1127
1128 hci_dev_unlock(hdev);
1129 }
1130
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1131 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1132 struct sk_buff *skb)
1133 {
1134 struct hci_cp_le_set_ext_adv_enable *cp;
1135 __u8 status = *((__u8 *) skb->data);
1136
1137 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1138
1139 if (status)
1140 return;
1141
1142 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1143 if (!cp)
1144 return;
1145
1146 hci_dev_lock(hdev);
1147
1148 if (cp->enable) {
1149 struct hci_conn *conn;
1150
1151 hci_dev_set_flag(hdev, HCI_LE_ADV);
1152
1153 conn = hci_lookup_le_connect(hdev);
1154 if (conn)
1155 queue_delayed_work(hdev->workqueue,
1156 &conn->le_conn_timeout,
1157 conn->conn_timeout);
1158 } else {
1159 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1160 }
1161
1162 hci_dev_unlock(hdev);
1163 }
1164
hci_cc_le_set_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1165 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1166 {
1167 struct hci_cp_le_set_scan_param *cp;
1168 __u8 status = *((__u8 *) skb->data);
1169
1170 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1171
1172 if (status)
1173 return;
1174
1175 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1176 if (!cp)
1177 return;
1178
1179 hci_dev_lock(hdev);
1180
1181 hdev->le_scan_type = cp->type;
1182
1183 hci_dev_unlock(hdev);
1184 }
1185
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1186 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1187 struct sk_buff *skb)
1188 {
1189 struct hci_cp_le_set_ext_scan_params *cp;
1190 __u8 status = *((__u8 *) skb->data);
1191 struct hci_cp_le_scan_phy_params *phy_param;
1192
1193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1194
1195 if (status)
1196 return;
1197
1198 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1199 if (!cp)
1200 return;
1201
1202 phy_param = (void *)cp->data;
1203
1204 hci_dev_lock(hdev);
1205
1206 hdev->le_scan_type = phy_param->type;
1207
1208 hci_dev_unlock(hdev);
1209 }
1210
has_pending_adv_report(struct hci_dev * hdev)1211 static bool has_pending_adv_report(struct hci_dev *hdev)
1212 {
1213 struct discovery_state *d = &hdev->discovery;
1214
1215 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1216 }
1217
clear_pending_adv_report(struct hci_dev * hdev)1218 static void clear_pending_adv_report(struct hci_dev *hdev)
1219 {
1220 struct discovery_state *d = &hdev->discovery;
1221
1222 bacpy(&d->last_adv_addr, BDADDR_ANY);
1223 d->last_adv_data_len = 0;
1224 }
1225
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1226 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1227 u8 bdaddr_type, s8 rssi, u32 flags,
1228 u8 *data, u8 len)
1229 {
1230 struct discovery_state *d = &hdev->discovery;
1231
1232 bacpy(&d->last_adv_addr, bdaddr);
1233 d->last_adv_addr_type = bdaddr_type;
1234 d->last_adv_rssi = rssi;
1235 d->last_adv_flags = flags;
1236 memcpy(d->last_adv_data, data, len);
1237 d->last_adv_data_len = len;
1238 }
1239
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1240 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1241 {
1242 hci_dev_lock(hdev);
1243
1244 switch (enable) {
1245 case LE_SCAN_ENABLE:
1246 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1247 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1248 clear_pending_adv_report(hdev);
1249 break;
1250
1251 case LE_SCAN_DISABLE:
1252 /* We do this here instead of when setting DISCOVERY_STOPPED
1253 * since the latter would potentially require waiting for
1254 * inquiry to stop too.
1255 */
1256 if (has_pending_adv_report(hdev)) {
1257 struct discovery_state *d = &hdev->discovery;
1258
1259 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1260 d->last_adv_addr_type, NULL,
1261 d->last_adv_rssi, d->last_adv_flags,
1262 d->last_adv_data,
1263 d->last_adv_data_len, NULL, 0);
1264 }
1265
1266 /* Cancel this timer so that we don't try to disable scanning
1267 * when it's already disabled.
1268 */
1269 cancel_delayed_work(&hdev->le_scan_disable);
1270
1271 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1272
1273 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1274 * interrupted scanning due to a connect request. Mark
1275 * therefore discovery as stopped. If this was not
1276 * because of a connect request advertising might have
1277 * been disabled because of active scanning, so
1278 * re-enable it again if necessary.
1279 */
1280 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1281 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1282 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1283 hdev->discovery.state == DISCOVERY_FINDING)
1284 hci_req_reenable_advertising(hdev);
1285
1286 break;
1287
1288 default:
1289 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1290 enable);
1291 break;
1292 }
1293
1294 hci_dev_unlock(hdev);
1295 }
1296
hci_cc_le_set_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1297 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1298 struct sk_buff *skb)
1299 {
1300 struct hci_cp_le_set_scan_enable *cp;
1301 __u8 status = *((__u8 *) skb->data);
1302
1303 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1304
1305 if (status)
1306 return;
1307
1308 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1309 if (!cp)
1310 return;
1311
1312 le_set_scan_enable_complete(hdev, cp->enable);
1313 }
1314
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1315 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1316 struct sk_buff *skb)
1317 {
1318 struct hci_cp_le_set_ext_scan_enable *cp;
1319 __u8 status = *((__u8 *) skb->data);
1320
1321 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1322
1323 if (status)
1324 return;
1325
1326 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1327 if (!cp)
1328 return;
1329
1330 le_set_scan_enable_complete(hdev, cp->enable);
1331 }
1332
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,struct sk_buff * skb)1333 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1334 struct sk_buff *skb)
1335 {
1336 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1337
1338 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1339 rp->num_of_sets);
1340
1341 if (rp->status)
1342 return;
1343
1344 hdev->le_num_of_adv_sets = rp->num_of_sets;
1345 }
1346
hci_cc_le_read_white_list_size(struct hci_dev * hdev,struct sk_buff * skb)1347 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1348 struct sk_buff *skb)
1349 {
1350 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1351
1352 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1353
1354 if (rp->status)
1355 return;
1356
1357 hdev->le_white_list_size = rp->size;
1358 }
1359
hci_cc_le_clear_white_list(struct hci_dev * hdev,struct sk_buff * skb)1360 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1361 struct sk_buff *skb)
1362 {
1363 __u8 status = *((__u8 *) skb->data);
1364
1365 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1366
1367 if (status)
1368 return;
1369
1370 hci_bdaddr_list_clear(&hdev->le_white_list);
1371 }
1372
hci_cc_le_add_to_white_list(struct hci_dev * hdev,struct sk_buff * skb)1373 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1374 struct sk_buff *skb)
1375 {
1376 struct hci_cp_le_add_to_white_list *sent;
1377 __u8 status = *((__u8 *) skb->data);
1378
1379 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1380
1381 if (status)
1382 return;
1383
1384 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1385 if (!sent)
1386 return;
1387
1388 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1389 sent->bdaddr_type);
1390 }
1391
hci_cc_le_del_from_white_list(struct hci_dev * hdev,struct sk_buff * skb)1392 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1393 struct sk_buff *skb)
1394 {
1395 struct hci_cp_le_del_from_white_list *sent;
1396 __u8 status = *((__u8 *) skb->data);
1397
1398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1399
1400 if (status)
1401 return;
1402
1403 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1404 if (!sent)
1405 return;
1406
1407 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1408 sent->bdaddr_type);
1409 }
1410
hci_cc_le_read_supported_states(struct hci_dev * hdev,struct sk_buff * skb)1411 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1412 struct sk_buff *skb)
1413 {
1414 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1415
1416 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1417
1418 if (rp->status)
1419 return;
1420
1421 memcpy(hdev->le_states, rp->le_states, 8);
1422 }
1423
hci_cc_le_read_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1424 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1425 struct sk_buff *skb)
1426 {
1427 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1428
1429 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1430
1431 if (rp->status)
1432 return;
1433
1434 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1435 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1436 }
1437
hci_cc_le_write_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1438 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1439 struct sk_buff *skb)
1440 {
1441 struct hci_cp_le_write_def_data_len *sent;
1442 __u8 status = *((__u8 *) skb->data);
1443
1444 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1445
1446 if (status)
1447 return;
1448
1449 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1450 if (!sent)
1451 return;
1452
1453 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1454 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1455 }
1456
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1457 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1458 struct sk_buff *skb)
1459 {
1460 __u8 status = *((__u8 *) skb->data);
1461
1462 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1463
1464 if (status)
1465 return;
1466
1467 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1468 }
1469
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,struct sk_buff * skb)1470 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1471 struct sk_buff *skb)
1472 {
1473 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1474
1475 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1476
1477 if (rp->status)
1478 return;
1479
1480 hdev->le_resolv_list_size = rp->size;
1481 }
1482
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,struct sk_buff * skb)1483 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1484 struct sk_buff *skb)
1485 {
1486 __u8 *sent, status = *((__u8 *) skb->data);
1487
1488 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1489
1490 if (status)
1491 return;
1492
1493 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1494 if (!sent)
1495 return;
1496
1497 hci_dev_lock(hdev);
1498
1499 if (*sent)
1500 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1501 else
1502 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1503
1504 hci_dev_unlock(hdev);
1505 }
1506
hci_cc_le_read_max_data_len(struct hci_dev * hdev,struct sk_buff * skb)1507 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1508 struct sk_buff *skb)
1509 {
1510 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1511
1512 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1513
1514 if (rp->status)
1515 return;
1516
1517 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1518 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1519 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1520 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1521 }
1522
hci_cc_write_le_host_supported(struct hci_dev * hdev,struct sk_buff * skb)1523 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1524 struct sk_buff *skb)
1525 {
1526 struct hci_cp_write_le_host_supported *sent;
1527 __u8 status = *((__u8 *) skb->data);
1528
1529 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1530
1531 if (status)
1532 return;
1533
1534 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1535 if (!sent)
1536 return;
1537
1538 hci_dev_lock(hdev);
1539
1540 if (sent->le) {
1541 hdev->features[1][0] |= LMP_HOST_LE;
1542 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1543 } else {
1544 hdev->features[1][0] &= ~LMP_HOST_LE;
1545 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1546 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1547 }
1548
1549 if (sent->simul)
1550 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1551 else
1552 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1553
1554 hci_dev_unlock(hdev);
1555 }
1556
hci_cc_set_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1557 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1558 {
1559 struct hci_cp_le_set_adv_param *cp;
1560 u8 status = *((u8 *) skb->data);
1561
1562 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1563
1564 if (status)
1565 return;
1566
1567 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1568 if (!cp)
1569 return;
1570
1571 hci_dev_lock(hdev);
1572 hdev->adv_addr_type = cp->own_address_type;
1573 hci_dev_unlock(hdev);
1574 }
1575
hci_cc_set_ext_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1576 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1577 {
1578 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1579 struct hci_cp_le_set_ext_adv_params *cp;
1580 struct adv_info *adv_instance;
1581
1582 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1583
1584 if (rp->status)
1585 return;
1586
1587 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1588 if (!cp)
1589 return;
1590
1591 hci_dev_lock(hdev);
1592 hdev->adv_addr_type = cp->own_addr_type;
1593 if (!hdev->cur_adv_instance) {
1594 /* Store in hdev for instance 0 */
1595 hdev->adv_tx_power = rp->tx_power;
1596 } else {
1597 adv_instance = hci_find_adv_instance(hdev,
1598 hdev->cur_adv_instance);
1599 if (adv_instance)
1600 adv_instance->tx_power = rp->tx_power;
1601 }
1602 /* Update adv data as tx power is known now */
1603 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1604 hci_dev_unlock(hdev);
1605 }
1606
hci_cc_read_rssi(struct hci_dev * hdev,struct sk_buff * skb)1607 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1608 {
1609 struct hci_rp_read_rssi *rp = (void *) skb->data;
1610 struct hci_conn *conn;
1611
1612 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1613
1614 if (rp->status)
1615 return;
1616
1617 hci_dev_lock(hdev);
1618
1619 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1620 if (conn)
1621 conn->rssi = rp->rssi;
1622
1623 hci_dev_unlock(hdev);
1624 }
1625
hci_cc_read_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1626 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1627 {
1628 struct hci_cp_read_tx_power *sent;
1629 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1630 struct hci_conn *conn;
1631
1632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1633
1634 if (rp->status)
1635 return;
1636
1637 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1638 if (!sent)
1639 return;
1640
1641 hci_dev_lock(hdev);
1642
1643 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1644 if (!conn)
1645 goto unlock;
1646
1647 switch (sent->type) {
1648 case 0x00:
1649 conn->tx_power = rp->tx_power;
1650 break;
1651 case 0x01:
1652 conn->max_tx_power = rp->tx_power;
1653 break;
1654 }
1655
1656 unlock:
1657 hci_dev_unlock(hdev);
1658 }
1659
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,struct sk_buff * skb)1660 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1661 {
1662 u8 status = *((u8 *) skb->data);
1663 u8 *mode;
1664
1665 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1666
1667 if (status)
1668 return;
1669
1670 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1671 if (mode)
1672 hdev->ssp_debug_mode = *mode;
1673 }
1674
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)1675 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1676 {
1677 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1678
1679 if (status) {
1680 hci_conn_check_pending(hdev);
1681 return;
1682 }
1683
1684 set_bit(HCI_INQUIRY, &hdev->flags);
1685 }
1686
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)1687 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1688 {
1689 struct hci_cp_create_conn *cp;
1690 struct hci_conn *conn;
1691
1692 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1693
1694 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1695 if (!cp)
1696 return;
1697
1698 hci_dev_lock(hdev);
1699
1700 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1701
1702 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1703
1704 if (status) {
1705 if (conn && conn->state == BT_CONNECT) {
1706 if (status != 0x0c || conn->attempt > 2) {
1707 conn->state = BT_CLOSED;
1708 hci_connect_cfm(conn, status);
1709 hci_conn_del(conn);
1710 } else
1711 conn->state = BT_CONNECT2;
1712 }
1713 } else {
1714 if (!conn) {
1715 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1716 HCI_ROLE_MASTER);
1717 if (!conn)
1718 bt_dev_err(hdev, "no memory for new connection");
1719 }
1720 }
1721
1722 hci_dev_unlock(hdev);
1723 }
1724
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)1725 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1726 {
1727 struct hci_cp_add_sco *cp;
1728 struct hci_conn *acl, *sco;
1729 __u16 handle;
1730
1731 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1732
1733 if (!status)
1734 return;
1735
1736 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1737 if (!cp)
1738 return;
1739
1740 handle = __le16_to_cpu(cp->handle);
1741
1742 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1743
1744 hci_dev_lock(hdev);
1745
1746 acl = hci_conn_hash_lookup_handle(hdev, handle);
1747 if (acl) {
1748 sco = acl->link;
1749 if (sco) {
1750 sco->state = BT_CLOSED;
1751
1752 hci_connect_cfm(sco, status);
1753 hci_conn_del(sco);
1754 }
1755 }
1756
1757 hci_dev_unlock(hdev);
1758 }
1759
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)1760 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1761 {
1762 struct hci_cp_auth_requested *cp;
1763 struct hci_conn *conn;
1764
1765 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1766
1767 if (!status)
1768 return;
1769
1770 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1771 if (!cp)
1772 return;
1773
1774 hci_dev_lock(hdev);
1775
1776 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1777 if (conn) {
1778 if (conn->state == BT_CONFIG) {
1779 hci_connect_cfm(conn, status);
1780 hci_conn_drop(conn);
1781 }
1782 }
1783
1784 hci_dev_unlock(hdev);
1785 }
1786
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)1787 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1788 {
1789 struct hci_cp_set_conn_encrypt *cp;
1790 struct hci_conn *conn;
1791
1792 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1793
1794 if (!status)
1795 return;
1796
1797 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1798 if (!cp)
1799 return;
1800
1801 hci_dev_lock(hdev);
1802
1803 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1804 if (conn) {
1805 if (conn->state == BT_CONFIG) {
1806 hci_connect_cfm(conn, status);
1807 hci_conn_drop(conn);
1808 }
1809 }
1810
1811 hci_dev_unlock(hdev);
1812 }
1813
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)1814 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1815 struct hci_conn *conn)
1816 {
1817 if (conn->state != BT_CONFIG || !conn->out)
1818 return 0;
1819
1820 if (conn->pending_sec_level == BT_SECURITY_SDP)
1821 return 0;
1822
1823 /* Only request authentication for SSP connections or non-SSP
1824 * devices with sec_level MEDIUM or HIGH or if MITM protection
1825 * is requested.
1826 */
1827 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1828 conn->pending_sec_level != BT_SECURITY_FIPS &&
1829 conn->pending_sec_level != BT_SECURITY_HIGH &&
1830 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1831 return 0;
1832
1833 return 1;
1834 }
1835
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)1836 static int hci_resolve_name(struct hci_dev *hdev,
1837 struct inquiry_entry *e)
1838 {
1839 struct hci_cp_remote_name_req cp;
1840
1841 memset(&cp, 0, sizeof(cp));
1842
1843 bacpy(&cp.bdaddr, &e->data.bdaddr);
1844 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1845 cp.pscan_mode = e->data.pscan_mode;
1846 cp.clock_offset = e->data.clock_offset;
1847
1848 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1849 }
1850
hci_resolve_next_name(struct hci_dev * hdev)1851 static bool hci_resolve_next_name(struct hci_dev *hdev)
1852 {
1853 struct discovery_state *discov = &hdev->discovery;
1854 struct inquiry_entry *e;
1855
1856 if (list_empty(&discov->resolve))
1857 return false;
1858
1859 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1860 if (!e)
1861 return false;
1862
1863 if (hci_resolve_name(hdev, e) == 0) {
1864 e->name_state = NAME_PENDING;
1865 return true;
1866 }
1867
1868 return false;
1869 }
1870
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)1871 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1872 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1873 {
1874 struct discovery_state *discov = &hdev->discovery;
1875 struct inquiry_entry *e;
1876
1877 /* Update the mgmt connected state if necessary. Be careful with
1878 * conn objects that exist but are not (yet) connected however.
1879 * Only those in BT_CONFIG or BT_CONNECTED states can be
1880 * considered connected.
1881 */
1882 if (conn &&
1883 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1884 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1885 mgmt_device_connected(hdev, conn, 0, name, name_len);
1886
1887 if (discov->state == DISCOVERY_STOPPED)
1888 return;
1889
1890 if (discov->state == DISCOVERY_STOPPING)
1891 goto discov_complete;
1892
1893 if (discov->state != DISCOVERY_RESOLVING)
1894 return;
1895
1896 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1897 /* If the device was not found in a list of found devices names of which
1898 * are pending. there is no need to continue resolving a next name as it
1899 * will be done upon receiving another Remote Name Request Complete
1900 * Event */
1901 if (!e)
1902 return;
1903
1904 list_del(&e->list);
1905 if (name) {
1906 e->name_state = NAME_KNOWN;
1907 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1908 e->data.rssi, name, name_len);
1909 } else {
1910 e->name_state = NAME_NOT_KNOWN;
1911 }
1912
1913 if (hci_resolve_next_name(hdev))
1914 return;
1915
1916 discov_complete:
1917 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1918 }
1919
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)1920 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1921 {
1922 struct hci_cp_remote_name_req *cp;
1923 struct hci_conn *conn;
1924
1925 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1926
1927 /* If successful wait for the name req complete event before
1928 * checking for the need to do authentication */
1929 if (!status)
1930 return;
1931
1932 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1933 if (!cp)
1934 return;
1935
1936 hci_dev_lock(hdev);
1937
1938 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1939
1940 if (hci_dev_test_flag(hdev, HCI_MGMT))
1941 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1942
1943 if (!conn)
1944 goto unlock;
1945
1946 if (!hci_outgoing_auth_needed(hdev, conn))
1947 goto unlock;
1948
1949 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1950 struct hci_cp_auth_requested auth_cp;
1951
1952 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1953
1954 auth_cp.handle = __cpu_to_le16(conn->handle);
1955 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1956 sizeof(auth_cp), &auth_cp);
1957 }
1958
1959 unlock:
1960 hci_dev_unlock(hdev);
1961 }
1962
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)1963 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1964 {
1965 struct hci_cp_read_remote_features *cp;
1966 struct hci_conn *conn;
1967
1968 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1969
1970 if (!status)
1971 return;
1972
1973 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1974 if (!cp)
1975 return;
1976
1977 hci_dev_lock(hdev);
1978
1979 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1980 if (conn) {
1981 if (conn->state == BT_CONFIG) {
1982 hci_connect_cfm(conn, status);
1983 hci_conn_drop(conn);
1984 }
1985 }
1986
1987 hci_dev_unlock(hdev);
1988 }
1989
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)1990 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1991 {
1992 struct hci_cp_read_remote_ext_features *cp;
1993 struct hci_conn *conn;
1994
1995 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1996
1997 if (!status)
1998 return;
1999
2000 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2001 if (!cp)
2002 return;
2003
2004 hci_dev_lock(hdev);
2005
2006 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2007 if (conn) {
2008 if (conn->state == BT_CONFIG) {
2009 hci_connect_cfm(conn, status);
2010 hci_conn_drop(conn);
2011 }
2012 }
2013
2014 hci_dev_unlock(hdev);
2015 }
2016
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2017 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2018 {
2019 struct hci_cp_setup_sync_conn *cp;
2020 struct hci_conn *acl, *sco;
2021 __u16 handle;
2022
2023 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2024
2025 if (!status)
2026 return;
2027
2028 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2029 if (!cp)
2030 return;
2031
2032 handle = __le16_to_cpu(cp->handle);
2033
2034 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2035
2036 hci_dev_lock(hdev);
2037
2038 acl = hci_conn_hash_lookup_handle(hdev, handle);
2039 if (acl) {
2040 sco = acl->link;
2041 if (sco) {
2042 sco->state = BT_CLOSED;
2043
2044 hci_connect_cfm(sco, status);
2045 hci_conn_del(sco);
2046 }
2047 }
2048
2049 hci_dev_unlock(hdev);
2050 }
2051
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2052 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2053 {
2054 struct hci_cp_sniff_mode *cp;
2055 struct hci_conn *conn;
2056
2057 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2058
2059 if (!status)
2060 return;
2061
2062 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2063 if (!cp)
2064 return;
2065
2066 hci_dev_lock(hdev);
2067
2068 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2069 if (conn) {
2070 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2071
2072 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2073 hci_sco_setup(conn, status);
2074 }
2075
2076 hci_dev_unlock(hdev);
2077 }
2078
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2079 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2080 {
2081 struct hci_cp_exit_sniff_mode *cp;
2082 struct hci_conn *conn;
2083
2084 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2085
2086 if (!status)
2087 return;
2088
2089 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2090 if (!cp)
2091 return;
2092
2093 hci_dev_lock(hdev);
2094
2095 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2096 if (conn) {
2097 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2098
2099 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2100 hci_sco_setup(conn, status);
2101 }
2102
2103 hci_dev_unlock(hdev);
2104 }
2105
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2106 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2107 {
2108 struct hci_cp_disconnect *cp;
2109 struct hci_conn *conn;
2110
2111 if (!status)
2112 return;
2113
2114 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2115 if (!cp)
2116 return;
2117
2118 hci_dev_lock(hdev);
2119
2120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2121 if (conn)
2122 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2123 conn->dst_type, status);
2124
2125 hci_dev_unlock(hdev);
2126 }
2127
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2128 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2129 u8 peer_addr_type, u8 own_address_type,
2130 u8 filter_policy)
2131 {
2132 struct hci_conn *conn;
2133
2134 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2135 peer_addr_type);
2136 if (!conn)
2137 return;
2138
2139 /* Store the initiator and responder address information which
2140 * is needed for SMP. These values will not change during the
2141 * lifetime of the connection.
2142 */
2143 conn->init_addr_type = own_address_type;
2144 if (own_address_type == ADDR_LE_DEV_RANDOM)
2145 bacpy(&conn->init_addr, &hdev->random_addr);
2146 else
2147 bacpy(&conn->init_addr, &hdev->bdaddr);
2148
2149 conn->resp_addr_type = peer_addr_type;
2150 bacpy(&conn->resp_addr, peer_addr);
2151
2152 /* We don't want the connection attempt to stick around
2153 * indefinitely since LE doesn't have a page timeout concept
2154 * like BR/EDR. Set a timer for any connection that doesn't use
2155 * the white list for connecting.
2156 */
2157 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2158 queue_delayed_work(conn->hdev->workqueue,
2159 &conn->le_conn_timeout,
2160 conn->conn_timeout);
2161 }
2162
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2163 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2164 {
2165 struct hci_cp_le_create_conn *cp;
2166
2167 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2168
2169 /* All connection failure handling is taken care of by the
2170 * hci_le_conn_failed function which is triggered by the HCI
2171 * request completion callbacks used for connecting.
2172 */
2173 if (status)
2174 return;
2175
2176 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2177 if (!cp)
2178 return;
2179
2180 hci_dev_lock(hdev);
2181
2182 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2183 cp->own_address_type, cp->filter_policy);
2184
2185 hci_dev_unlock(hdev);
2186 }
2187
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2188 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2189 {
2190 struct hci_cp_le_ext_create_conn *cp;
2191
2192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2193
2194 /* All connection failure handling is taken care of by the
2195 * hci_le_conn_failed function which is triggered by the HCI
2196 * request completion callbacks used for connecting.
2197 */
2198 if (status)
2199 return;
2200
2201 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2202 if (!cp)
2203 return;
2204
2205 hci_dev_lock(hdev);
2206
2207 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2208 cp->own_addr_type, cp->filter_policy);
2209
2210 hci_dev_unlock(hdev);
2211 }
2212
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2213 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2214 {
2215 struct hci_cp_le_read_remote_features *cp;
2216 struct hci_conn *conn;
2217
2218 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2219
2220 if (!status)
2221 return;
2222
2223 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2224 if (!cp)
2225 return;
2226
2227 hci_dev_lock(hdev);
2228
2229 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2230 if (conn) {
2231 if (conn->state == BT_CONFIG) {
2232 hci_connect_cfm(conn, status);
2233 hci_conn_drop(conn);
2234 }
2235 }
2236
2237 hci_dev_unlock(hdev);
2238 }
2239
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2240 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2241 {
2242 struct hci_cp_le_start_enc *cp;
2243 struct hci_conn *conn;
2244
2245 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2246
2247 if (!status)
2248 return;
2249
2250 hci_dev_lock(hdev);
2251
2252 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2253 if (!cp)
2254 goto unlock;
2255
2256 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2257 if (!conn)
2258 goto unlock;
2259
2260 if (conn->state != BT_CONNECTED)
2261 goto unlock;
2262
2263 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2264 hci_conn_drop(conn);
2265
2266 unlock:
2267 hci_dev_unlock(hdev);
2268 }
2269
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2270 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2271 {
2272 struct hci_cp_switch_role *cp;
2273 struct hci_conn *conn;
2274
2275 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2276
2277 if (!status)
2278 return;
2279
2280 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2281 if (!cp)
2282 return;
2283
2284 hci_dev_lock(hdev);
2285
2286 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2287 if (conn)
2288 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2289
2290 hci_dev_unlock(hdev);
2291 }
2292
hci_inquiry_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2293 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2294 {
2295 __u8 status = *((__u8 *) skb->data);
2296 struct discovery_state *discov = &hdev->discovery;
2297 struct inquiry_entry *e;
2298
2299 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2300
2301 hci_conn_check_pending(hdev);
2302
2303 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2304 return;
2305
2306 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2307 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2308
2309 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2310 return;
2311
2312 hci_dev_lock(hdev);
2313
2314 if (discov->state != DISCOVERY_FINDING)
2315 goto unlock;
2316
2317 if (list_empty(&discov->resolve)) {
2318 /* When BR/EDR inquiry is active and no LE scanning is in
2319 * progress, then change discovery state to indicate completion.
2320 *
2321 * When running LE scanning and BR/EDR inquiry simultaneously
2322 * and the LE scan already finished, then change the discovery
2323 * state to indicate completion.
2324 */
2325 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2326 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2327 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2328 goto unlock;
2329 }
2330
2331 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2332 if (e && hci_resolve_name(hdev, e) == 0) {
2333 e->name_state = NAME_PENDING;
2334 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2335 } else {
2336 /* When BR/EDR inquiry is active and no LE scanning is in
2337 * progress, then change discovery state to indicate completion.
2338 *
2339 * When running LE scanning and BR/EDR inquiry simultaneously
2340 * and the LE scan already finished, then change the discovery
2341 * state to indicate completion.
2342 */
2343 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2344 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2345 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2346 }
2347
2348 unlock:
2349 hci_dev_unlock(hdev);
2350 }
2351
hci_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)2352 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2353 {
2354 struct inquiry_data data;
2355 struct inquiry_info *info = (void *) (skb->data + 1);
2356 int num_rsp = *((__u8 *) skb->data);
2357
2358 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2359
2360 if (!num_rsp)
2361 return;
2362
2363 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2364 return;
2365
2366 hci_dev_lock(hdev);
2367
2368 for (; num_rsp; num_rsp--, info++) {
2369 u32 flags;
2370
2371 bacpy(&data.bdaddr, &info->bdaddr);
2372 data.pscan_rep_mode = info->pscan_rep_mode;
2373 data.pscan_period_mode = info->pscan_period_mode;
2374 data.pscan_mode = info->pscan_mode;
2375 memcpy(data.dev_class, info->dev_class, 3);
2376 data.clock_offset = info->clock_offset;
2377 data.rssi = HCI_RSSI_INVALID;
2378 data.ssp_mode = 0x00;
2379
2380 flags = hci_inquiry_cache_update(hdev, &data, false);
2381
2382 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2383 info->dev_class, HCI_RSSI_INVALID,
2384 flags, NULL, 0, NULL, 0);
2385 }
2386
2387 hci_dev_unlock(hdev);
2388 }
2389
hci_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2390 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2391 {
2392 struct hci_ev_conn_complete *ev = (void *) skb->data;
2393 struct hci_conn *conn;
2394
2395 BT_DBG("%s", hdev->name);
2396
2397 hci_dev_lock(hdev);
2398
2399 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2400 if (!conn) {
2401 if (ev->link_type != SCO_LINK)
2402 goto unlock;
2403
2404 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2405 if (!conn)
2406 goto unlock;
2407
2408 conn->type = SCO_LINK;
2409 }
2410
2411 if (!ev->status) {
2412 conn->handle = __le16_to_cpu(ev->handle);
2413
2414 if (conn->type == ACL_LINK) {
2415 conn->state = BT_CONFIG;
2416 hci_conn_hold(conn);
2417
2418 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2419 !hci_find_link_key(hdev, &ev->bdaddr))
2420 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2421 else
2422 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2423 } else
2424 conn->state = BT_CONNECTED;
2425
2426 hci_debugfs_create_conn(conn);
2427 hci_conn_add_sysfs(conn);
2428
2429 if (test_bit(HCI_AUTH, &hdev->flags))
2430 set_bit(HCI_CONN_AUTH, &conn->flags);
2431
2432 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2433 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2434
2435 /* Get remote features */
2436 if (conn->type == ACL_LINK) {
2437 struct hci_cp_read_remote_features cp;
2438 cp.handle = ev->handle;
2439 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2440 sizeof(cp), &cp);
2441
2442 hci_req_update_scan(hdev);
2443 }
2444
2445 /* Set packet type for incoming connection */
2446 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2447 struct hci_cp_change_conn_ptype cp;
2448 cp.handle = ev->handle;
2449 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2450 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2451 &cp);
2452 }
2453 } else {
2454 conn->state = BT_CLOSED;
2455 if (conn->type == ACL_LINK)
2456 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2457 conn->dst_type, ev->status);
2458 }
2459
2460 if (conn->type == ACL_LINK)
2461 hci_sco_setup(conn, ev->status);
2462
2463 if (ev->status) {
2464 hci_connect_cfm(conn, ev->status);
2465 hci_conn_del(conn);
2466 } else if (ev->link_type != ACL_LINK)
2467 hci_connect_cfm(conn, ev->status);
2468
2469 unlock:
2470 hci_dev_unlock(hdev);
2471
2472 hci_conn_check_pending(hdev);
2473 }
2474
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)2475 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2476 {
2477 struct hci_cp_reject_conn_req cp;
2478
2479 bacpy(&cp.bdaddr, bdaddr);
2480 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2481 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2482 }
2483
hci_conn_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2484 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2485 {
2486 struct hci_ev_conn_request *ev = (void *) skb->data;
2487 int mask = hdev->link_mode;
2488 struct inquiry_entry *ie;
2489 struct hci_conn *conn;
2490 __u8 flags = 0;
2491
2492 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2493 ev->link_type);
2494
2495 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2496 &flags);
2497
2498 if (!(mask & HCI_LM_ACCEPT)) {
2499 hci_reject_conn(hdev, &ev->bdaddr);
2500 return;
2501 }
2502
2503 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2504 BDADDR_BREDR)) {
2505 hci_reject_conn(hdev, &ev->bdaddr);
2506 return;
2507 }
2508
2509 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2510 * connection. These features are only touched through mgmt so
2511 * only do the checks if HCI_MGMT is set.
2512 */
2513 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2514 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2515 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2516 BDADDR_BREDR)) {
2517 hci_reject_conn(hdev, &ev->bdaddr);
2518 return;
2519 }
2520
2521 /* Connection accepted */
2522
2523 hci_dev_lock(hdev);
2524
2525 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2526 if (ie)
2527 memcpy(ie->data.dev_class, ev->dev_class, 3);
2528
2529 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2530 &ev->bdaddr);
2531 if (!conn) {
2532 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2533 HCI_ROLE_SLAVE);
2534 if (!conn) {
2535 bt_dev_err(hdev, "no memory for new connection");
2536 hci_dev_unlock(hdev);
2537 return;
2538 }
2539 }
2540
2541 memcpy(conn->dev_class, ev->dev_class, 3);
2542
2543 hci_dev_unlock(hdev);
2544
2545 if (ev->link_type == ACL_LINK ||
2546 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2547 struct hci_cp_accept_conn_req cp;
2548 conn->state = BT_CONNECT;
2549
2550 bacpy(&cp.bdaddr, &ev->bdaddr);
2551
2552 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2553 cp.role = 0x00; /* Become master */
2554 else
2555 cp.role = 0x01; /* Remain slave */
2556
2557 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2558 } else if (!(flags & HCI_PROTO_DEFER)) {
2559 struct hci_cp_accept_sync_conn_req cp;
2560 conn->state = BT_CONNECT;
2561
2562 bacpy(&cp.bdaddr, &ev->bdaddr);
2563 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2564
2565 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2566 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2567 cp.max_latency = cpu_to_le16(0xffff);
2568 cp.content_format = cpu_to_le16(hdev->voice_setting);
2569 cp.retrans_effort = 0xff;
2570
2571 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2572 &cp);
2573 } else {
2574 conn->state = BT_CONNECT2;
2575 hci_connect_cfm(conn, 0);
2576 }
2577 }
2578
hci_to_mgmt_reason(u8 err)2579 static u8 hci_to_mgmt_reason(u8 err)
2580 {
2581 switch (err) {
2582 case HCI_ERROR_CONNECTION_TIMEOUT:
2583 return MGMT_DEV_DISCONN_TIMEOUT;
2584 case HCI_ERROR_REMOTE_USER_TERM:
2585 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2586 case HCI_ERROR_REMOTE_POWER_OFF:
2587 return MGMT_DEV_DISCONN_REMOTE;
2588 case HCI_ERROR_LOCAL_HOST_TERM:
2589 return MGMT_DEV_DISCONN_LOCAL_HOST;
2590 default:
2591 return MGMT_DEV_DISCONN_UNKNOWN;
2592 }
2593 }
2594
hci_disconn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2595 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2596 {
2597 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2598 u8 reason;
2599 struct hci_conn_params *params;
2600 struct hci_conn *conn;
2601 bool mgmt_connected;
2602 u8 type;
2603
2604 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2605
2606 hci_dev_lock(hdev);
2607
2608 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2609 if (!conn)
2610 goto unlock;
2611
2612 if (ev->status) {
2613 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2614 conn->dst_type, ev->status);
2615 goto unlock;
2616 }
2617
2618 conn->state = BT_CLOSED;
2619
2620 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2621
2622 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2623 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2624 else
2625 reason = hci_to_mgmt_reason(ev->reason);
2626
2627 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2628 reason, mgmt_connected);
2629
2630 if (conn->type == ACL_LINK) {
2631 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2632 hci_remove_link_key(hdev, &conn->dst);
2633
2634 hci_req_update_scan(hdev);
2635 }
2636
2637 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2638 if (params) {
2639 switch (params->auto_connect) {
2640 case HCI_AUTO_CONN_LINK_LOSS:
2641 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2642 break;
2643 /* Fall through */
2644
2645 case HCI_AUTO_CONN_DIRECT:
2646 case HCI_AUTO_CONN_ALWAYS:
2647 list_del_init(¶ms->action);
2648 list_add(¶ms->action, &hdev->pend_le_conns);
2649 hci_update_background_scan(hdev);
2650 break;
2651
2652 default:
2653 break;
2654 }
2655 }
2656
2657 type = conn->type;
2658
2659 hci_disconn_cfm(conn, ev->reason);
2660 hci_conn_del(conn);
2661
2662 /* Re-enable advertising if necessary, since it might
2663 * have been disabled by the connection. From the
2664 * HCI_LE_Set_Advertise_Enable command description in
2665 * the core specification (v4.0):
2666 * "The Controller shall continue advertising until the Host
2667 * issues an LE_Set_Advertise_Enable command with
2668 * Advertising_Enable set to 0x00 (Advertising is disabled)
2669 * or until a connection is created or until the Advertising
2670 * is timed out due to Directed Advertising."
2671 */
2672 if (type == LE_LINK)
2673 hci_req_reenable_advertising(hdev);
2674
2675 unlock:
2676 hci_dev_unlock(hdev);
2677 }
2678
hci_auth_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2679 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2680 {
2681 struct hci_ev_auth_complete *ev = (void *) skb->data;
2682 struct hci_conn *conn;
2683
2684 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2685
2686 hci_dev_lock(hdev);
2687
2688 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2689 if (!conn)
2690 goto unlock;
2691
2692 if (!ev->status) {
2693 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2694
2695 if (!hci_conn_ssp_enabled(conn) &&
2696 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2697 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2698 } else {
2699 set_bit(HCI_CONN_AUTH, &conn->flags);
2700 conn->sec_level = conn->pending_sec_level;
2701 }
2702 } else {
2703 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2704 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2705
2706 mgmt_auth_failed(conn, ev->status);
2707 }
2708
2709 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2710 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2711
2712 if (conn->state == BT_CONFIG) {
2713 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2714 struct hci_cp_set_conn_encrypt cp;
2715 cp.handle = ev->handle;
2716 cp.encrypt = 0x01;
2717 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2718 &cp);
2719 } else {
2720 conn->state = BT_CONNECTED;
2721 hci_connect_cfm(conn, ev->status);
2722 hci_conn_drop(conn);
2723 }
2724 } else {
2725 hci_auth_cfm(conn, ev->status);
2726
2727 hci_conn_hold(conn);
2728 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2729 hci_conn_drop(conn);
2730 }
2731
2732 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2733 if (!ev->status) {
2734 struct hci_cp_set_conn_encrypt cp;
2735 cp.handle = ev->handle;
2736 cp.encrypt = 0x01;
2737 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2738 &cp);
2739 } else {
2740 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2741 hci_encrypt_cfm(conn, ev->status, 0x00);
2742 }
2743 }
2744
2745 unlock:
2746 hci_dev_unlock(hdev);
2747 }
2748
hci_remote_name_evt(struct hci_dev * hdev,struct sk_buff * skb)2749 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2750 {
2751 struct hci_ev_remote_name *ev = (void *) skb->data;
2752 struct hci_conn *conn;
2753
2754 BT_DBG("%s", hdev->name);
2755
2756 hci_conn_check_pending(hdev);
2757
2758 hci_dev_lock(hdev);
2759
2760 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2761
2762 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2763 goto check_auth;
2764
2765 if (ev->status == 0)
2766 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2767 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2768 else
2769 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2770
2771 check_auth:
2772 if (!conn)
2773 goto unlock;
2774
2775 if (!hci_outgoing_auth_needed(hdev, conn))
2776 goto unlock;
2777
2778 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2779 struct hci_cp_auth_requested cp;
2780
2781 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2782
2783 cp.handle = __cpu_to_le16(conn->handle);
2784 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2785 }
2786
2787 unlock:
2788 hci_dev_unlock(hdev);
2789 }
2790
read_enc_key_size_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)2791 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2792 u16 opcode, struct sk_buff *skb)
2793 {
2794 const struct hci_rp_read_enc_key_size *rp;
2795 struct hci_conn *conn;
2796 u16 handle;
2797
2798 BT_DBG("%s status 0x%02x", hdev->name, status);
2799
2800 if (!skb || skb->len < sizeof(*rp)) {
2801 bt_dev_err(hdev, "invalid read key size response");
2802 return;
2803 }
2804
2805 rp = (void *)skb->data;
2806 handle = le16_to_cpu(rp->handle);
2807
2808 hci_dev_lock(hdev);
2809
2810 conn = hci_conn_hash_lookup_handle(hdev, handle);
2811 if (!conn)
2812 goto unlock;
2813
2814 /* If we fail to read the encryption key size, assume maximum
2815 * (which is the same we do also when this HCI command isn't
2816 * supported.
2817 */
2818 if (rp->status) {
2819 bt_dev_err(hdev, "failed to read key size for handle %u",
2820 handle);
2821 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2822 } else {
2823 conn->enc_key_size = rp->key_size;
2824 }
2825
2826 if (conn->state == BT_CONFIG) {
2827 conn->state = BT_CONNECTED;
2828 hci_connect_cfm(conn, 0);
2829 hci_conn_drop(conn);
2830 } else {
2831 u8 encrypt;
2832
2833 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2834 encrypt = 0x00;
2835 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2836 encrypt = 0x02;
2837 else
2838 encrypt = 0x01;
2839
2840 hci_encrypt_cfm(conn, 0, encrypt);
2841 }
2842
2843 unlock:
2844 hci_dev_unlock(hdev);
2845 }
2846
hci_encrypt_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2847 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2848 {
2849 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2850 struct hci_conn *conn;
2851
2852 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2853
2854 hci_dev_lock(hdev);
2855
2856 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2857 if (!conn)
2858 goto unlock;
2859
2860 if (!ev->status) {
2861 if (ev->encrypt) {
2862 /* Encryption implies authentication */
2863 set_bit(HCI_CONN_AUTH, &conn->flags);
2864 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2865 conn->sec_level = conn->pending_sec_level;
2866
2867 /* P-256 authentication key implies FIPS */
2868 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2869 set_bit(HCI_CONN_FIPS, &conn->flags);
2870
2871 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2872 conn->type == LE_LINK)
2873 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2874 } else {
2875 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2876 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2877 }
2878 }
2879
2880 /* We should disregard the current RPA and generate a new one
2881 * whenever the encryption procedure fails.
2882 */
2883 if (ev->status && conn->type == LE_LINK) {
2884 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2885 hci_adv_instances_set_rpa_expired(hdev, true);
2886 }
2887
2888 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2889
2890 if (ev->status && conn->state == BT_CONNECTED) {
2891 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2892 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2893
2894 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2895 hci_conn_drop(conn);
2896 goto unlock;
2897 }
2898
2899 /* In Secure Connections Only mode, do not allow any connections
2900 * that are not encrypted with AES-CCM using a P-256 authenticated
2901 * combination key.
2902 */
2903 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2904 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2905 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2906 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2907 hci_conn_drop(conn);
2908 goto unlock;
2909 }
2910
2911 /* Try reading the encryption key size for encrypted ACL links */
2912 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2913 struct hci_cp_read_enc_key_size cp;
2914 struct hci_request req;
2915
2916 /* Only send HCI_Read_Encryption_Key_Size if the
2917 * controller really supports it. If it doesn't, assume
2918 * the default size (16).
2919 */
2920 if (!(hdev->commands[20] & 0x10)) {
2921 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2922 goto notify;
2923 }
2924
2925 hci_req_init(&req, hdev);
2926
2927 cp.handle = cpu_to_le16(conn->handle);
2928 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2929
2930 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2931 bt_dev_err(hdev, "sending read key size failed");
2932 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2933 goto notify;
2934 }
2935
2936 goto unlock;
2937 }
2938
2939 notify:
2940 if (conn->state == BT_CONFIG) {
2941 if (!ev->status)
2942 conn->state = BT_CONNECTED;
2943
2944 hci_connect_cfm(conn, ev->status);
2945 hci_conn_drop(conn);
2946 } else
2947 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2948
2949 unlock:
2950 hci_dev_unlock(hdev);
2951 }
2952
hci_change_link_key_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2953 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2954 struct sk_buff *skb)
2955 {
2956 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2957 struct hci_conn *conn;
2958
2959 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2960
2961 hci_dev_lock(hdev);
2962
2963 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2964 if (conn) {
2965 if (!ev->status)
2966 set_bit(HCI_CONN_SECURE, &conn->flags);
2967
2968 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2969
2970 hci_key_change_cfm(conn, ev->status);
2971 }
2972
2973 hci_dev_unlock(hdev);
2974 }
2975
hci_remote_features_evt(struct hci_dev * hdev,struct sk_buff * skb)2976 static void hci_remote_features_evt(struct hci_dev *hdev,
2977 struct sk_buff *skb)
2978 {
2979 struct hci_ev_remote_features *ev = (void *) skb->data;
2980 struct hci_conn *conn;
2981
2982 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2983
2984 hci_dev_lock(hdev);
2985
2986 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2987 if (!conn)
2988 goto unlock;
2989
2990 if (!ev->status)
2991 memcpy(conn->features[0], ev->features, 8);
2992
2993 if (conn->state != BT_CONFIG)
2994 goto unlock;
2995
2996 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2997 lmp_ext_feat_capable(conn)) {
2998 struct hci_cp_read_remote_ext_features cp;
2999 cp.handle = ev->handle;
3000 cp.page = 0x01;
3001 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3002 sizeof(cp), &cp);
3003 goto unlock;
3004 }
3005
3006 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3007 struct hci_cp_remote_name_req cp;
3008 memset(&cp, 0, sizeof(cp));
3009 bacpy(&cp.bdaddr, &conn->dst);
3010 cp.pscan_rep_mode = 0x02;
3011 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3012 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3013 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3014
3015 if (!hci_outgoing_auth_needed(hdev, conn)) {
3016 conn->state = BT_CONNECTED;
3017 hci_connect_cfm(conn, ev->status);
3018 hci_conn_drop(conn);
3019 }
3020
3021 unlock:
3022 hci_dev_unlock(hdev);
3023 }
3024
hci_cmd_complete_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3025 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3026 u16 *opcode, u8 *status,
3027 hci_req_complete_t *req_complete,
3028 hci_req_complete_skb_t *req_complete_skb)
3029 {
3030 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3031
3032 *opcode = __le16_to_cpu(ev->opcode);
3033 *status = skb->data[sizeof(*ev)];
3034
3035 skb_pull(skb, sizeof(*ev));
3036
3037 switch (*opcode) {
3038 case HCI_OP_INQUIRY_CANCEL:
3039 hci_cc_inquiry_cancel(hdev, skb);
3040 break;
3041
3042 case HCI_OP_PERIODIC_INQ:
3043 hci_cc_periodic_inq(hdev, skb);
3044 break;
3045
3046 case HCI_OP_EXIT_PERIODIC_INQ:
3047 hci_cc_exit_periodic_inq(hdev, skb);
3048 break;
3049
3050 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3051 hci_cc_remote_name_req_cancel(hdev, skb);
3052 break;
3053
3054 case HCI_OP_ROLE_DISCOVERY:
3055 hci_cc_role_discovery(hdev, skb);
3056 break;
3057
3058 case HCI_OP_READ_LINK_POLICY:
3059 hci_cc_read_link_policy(hdev, skb);
3060 break;
3061
3062 case HCI_OP_WRITE_LINK_POLICY:
3063 hci_cc_write_link_policy(hdev, skb);
3064 break;
3065
3066 case HCI_OP_READ_DEF_LINK_POLICY:
3067 hci_cc_read_def_link_policy(hdev, skb);
3068 break;
3069
3070 case HCI_OP_WRITE_DEF_LINK_POLICY:
3071 hci_cc_write_def_link_policy(hdev, skb);
3072 break;
3073
3074 case HCI_OP_RESET:
3075 hci_cc_reset(hdev, skb);
3076 break;
3077
3078 case HCI_OP_READ_STORED_LINK_KEY:
3079 hci_cc_read_stored_link_key(hdev, skb);
3080 break;
3081
3082 case HCI_OP_DELETE_STORED_LINK_KEY:
3083 hci_cc_delete_stored_link_key(hdev, skb);
3084 break;
3085
3086 case HCI_OP_WRITE_LOCAL_NAME:
3087 hci_cc_write_local_name(hdev, skb);
3088 break;
3089
3090 case HCI_OP_READ_LOCAL_NAME:
3091 hci_cc_read_local_name(hdev, skb);
3092 break;
3093
3094 case HCI_OP_WRITE_AUTH_ENABLE:
3095 hci_cc_write_auth_enable(hdev, skb);
3096 break;
3097
3098 case HCI_OP_WRITE_ENCRYPT_MODE:
3099 hci_cc_write_encrypt_mode(hdev, skb);
3100 break;
3101
3102 case HCI_OP_WRITE_SCAN_ENABLE:
3103 hci_cc_write_scan_enable(hdev, skb);
3104 break;
3105
3106 case HCI_OP_READ_CLASS_OF_DEV:
3107 hci_cc_read_class_of_dev(hdev, skb);
3108 break;
3109
3110 case HCI_OP_WRITE_CLASS_OF_DEV:
3111 hci_cc_write_class_of_dev(hdev, skb);
3112 break;
3113
3114 case HCI_OP_READ_VOICE_SETTING:
3115 hci_cc_read_voice_setting(hdev, skb);
3116 break;
3117
3118 case HCI_OP_WRITE_VOICE_SETTING:
3119 hci_cc_write_voice_setting(hdev, skb);
3120 break;
3121
3122 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3123 hci_cc_read_num_supported_iac(hdev, skb);
3124 break;
3125
3126 case HCI_OP_WRITE_SSP_MODE:
3127 hci_cc_write_ssp_mode(hdev, skb);
3128 break;
3129
3130 case HCI_OP_WRITE_SC_SUPPORT:
3131 hci_cc_write_sc_support(hdev, skb);
3132 break;
3133
3134 case HCI_OP_READ_LOCAL_VERSION:
3135 hci_cc_read_local_version(hdev, skb);
3136 break;
3137
3138 case HCI_OP_READ_LOCAL_COMMANDS:
3139 hci_cc_read_local_commands(hdev, skb);
3140 break;
3141
3142 case HCI_OP_READ_LOCAL_FEATURES:
3143 hci_cc_read_local_features(hdev, skb);
3144 break;
3145
3146 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3147 hci_cc_read_local_ext_features(hdev, skb);
3148 break;
3149
3150 case HCI_OP_READ_BUFFER_SIZE:
3151 hci_cc_read_buffer_size(hdev, skb);
3152 break;
3153
3154 case HCI_OP_READ_BD_ADDR:
3155 hci_cc_read_bd_addr(hdev, skb);
3156 break;
3157
3158 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3159 hci_cc_read_page_scan_activity(hdev, skb);
3160 break;
3161
3162 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3163 hci_cc_write_page_scan_activity(hdev, skb);
3164 break;
3165
3166 case HCI_OP_READ_PAGE_SCAN_TYPE:
3167 hci_cc_read_page_scan_type(hdev, skb);
3168 break;
3169
3170 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3171 hci_cc_write_page_scan_type(hdev, skb);
3172 break;
3173
3174 case HCI_OP_READ_DATA_BLOCK_SIZE:
3175 hci_cc_read_data_block_size(hdev, skb);
3176 break;
3177
3178 case HCI_OP_READ_FLOW_CONTROL_MODE:
3179 hci_cc_read_flow_control_mode(hdev, skb);
3180 break;
3181
3182 case HCI_OP_READ_LOCAL_AMP_INFO:
3183 hci_cc_read_local_amp_info(hdev, skb);
3184 break;
3185
3186 case HCI_OP_READ_CLOCK:
3187 hci_cc_read_clock(hdev, skb);
3188 break;
3189
3190 case HCI_OP_READ_INQ_RSP_TX_POWER:
3191 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3192 break;
3193
3194 case HCI_OP_PIN_CODE_REPLY:
3195 hci_cc_pin_code_reply(hdev, skb);
3196 break;
3197
3198 case HCI_OP_PIN_CODE_NEG_REPLY:
3199 hci_cc_pin_code_neg_reply(hdev, skb);
3200 break;
3201
3202 case HCI_OP_READ_LOCAL_OOB_DATA:
3203 hci_cc_read_local_oob_data(hdev, skb);
3204 break;
3205
3206 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3207 hci_cc_read_local_oob_ext_data(hdev, skb);
3208 break;
3209
3210 case HCI_OP_LE_READ_BUFFER_SIZE:
3211 hci_cc_le_read_buffer_size(hdev, skb);
3212 break;
3213
3214 case HCI_OP_LE_READ_LOCAL_FEATURES:
3215 hci_cc_le_read_local_features(hdev, skb);
3216 break;
3217
3218 case HCI_OP_LE_READ_ADV_TX_POWER:
3219 hci_cc_le_read_adv_tx_power(hdev, skb);
3220 break;
3221
3222 case HCI_OP_USER_CONFIRM_REPLY:
3223 hci_cc_user_confirm_reply(hdev, skb);
3224 break;
3225
3226 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3227 hci_cc_user_confirm_neg_reply(hdev, skb);
3228 break;
3229
3230 case HCI_OP_USER_PASSKEY_REPLY:
3231 hci_cc_user_passkey_reply(hdev, skb);
3232 break;
3233
3234 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3235 hci_cc_user_passkey_neg_reply(hdev, skb);
3236 break;
3237
3238 case HCI_OP_LE_SET_RANDOM_ADDR:
3239 hci_cc_le_set_random_addr(hdev, skb);
3240 break;
3241
3242 case HCI_OP_LE_SET_ADV_ENABLE:
3243 hci_cc_le_set_adv_enable(hdev, skb);
3244 break;
3245
3246 case HCI_OP_LE_SET_SCAN_PARAM:
3247 hci_cc_le_set_scan_param(hdev, skb);
3248 break;
3249
3250 case HCI_OP_LE_SET_SCAN_ENABLE:
3251 hci_cc_le_set_scan_enable(hdev, skb);
3252 break;
3253
3254 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3255 hci_cc_le_read_white_list_size(hdev, skb);
3256 break;
3257
3258 case HCI_OP_LE_CLEAR_WHITE_LIST:
3259 hci_cc_le_clear_white_list(hdev, skb);
3260 break;
3261
3262 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3263 hci_cc_le_add_to_white_list(hdev, skb);
3264 break;
3265
3266 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3267 hci_cc_le_del_from_white_list(hdev, skb);
3268 break;
3269
3270 case HCI_OP_LE_READ_SUPPORTED_STATES:
3271 hci_cc_le_read_supported_states(hdev, skb);
3272 break;
3273
3274 case HCI_OP_LE_READ_DEF_DATA_LEN:
3275 hci_cc_le_read_def_data_len(hdev, skb);
3276 break;
3277
3278 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3279 hci_cc_le_write_def_data_len(hdev, skb);
3280 break;
3281
3282 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3283 hci_cc_le_clear_resolv_list(hdev, skb);
3284 break;
3285
3286 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3287 hci_cc_le_read_resolv_list_size(hdev, skb);
3288 break;
3289
3290 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3291 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3292 break;
3293
3294 case HCI_OP_LE_READ_MAX_DATA_LEN:
3295 hci_cc_le_read_max_data_len(hdev, skb);
3296 break;
3297
3298 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3299 hci_cc_write_le_host_supported(hdev, skb);
3300 break;
3301
3302 case HCI_OP_LE_SET_ADV_PARAM:
3303 hci_cc_set_adv_param(hdev, skb);
3304 break;
3305
3306 case HCI_OP_READ_RSSI:
3307 hci_cc_read_rssi(hdev, skb);
3308 break;
3309
3310 case HCI_OP_READ_TX_POWER:
3311 hci_cc_read_tx_power(hdev, skb);
3312 break;
3313
3314 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3315 hci_cc_write_ssp_debug_mode(hdev, skb);
3316 break;
3317
3318 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3319 hci_cc_le_set_ext_scan_param(hdev, skb);
3320 break;
3321
3322 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3323 hci_cc_le_set_ext_scan_enable(hdev, skb);
3324 break;
3325
3326 case HCI_OP_LE_SET_DEFAULT_PHY:
3327 hci_cc_le_set_default_phy(hdev, skb);
3328 break;
3329
3330 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3331 hci_cc_le_read_num_adv_sets(hdev, skb);
3332 break;
3333
3334 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3335 hci_cc_set_ext_adv_param(hdev, skb);
3336 break;
3337
3338 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3339 hci_cc_le_set_ext_adv_enable(hdev, skb);
3340 break;
3341
3342 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3343 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3344 break;
3345
3346 default:
3347 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3348 break;
3349 }
3350
3351 if (*opcode != HCI_OP_NOP)
3352 cancel_delayed_work(&hdev->cmd_timer);
3353
3354 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3355 atomic_set(&hdev->cmd_cnt, 1);
3356
3357 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3358 req_complete_skb);
3359
3360 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3361 queue_work(hdev->workqueue, &hdev->cmd_work);
3362 }
3363
hci_cmd_status_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3364 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3365 u16 *opcode, u8 *status,
3366 hci_req_complete_t *req_complete,
3367 hci_req_complete_skb_t *req_complete_skb)
3368 {
3369 struct hci_ev_cmd_status *ev = (void *) skb->data;
3370
3371 skb_pull(skb, sizeof(*ev));
3372
3373 *opcode = __le16_to_cpu(ev->opcode);
3374 *status = ev->status;
3375
3376 switch (*opcode) {
3377 case HCI_OP_INQUIRY:
3378 hci_cs_inquiry(hdev, ev->status);
3379 break;
3380
3381 case HCI_OP_CREATE_CONN:
3382 hci_cs_create_conn(hdev, ev->status);
3383 break;
3384
3385 case HCI_OP_DISCONNECT:
3386 hci_cs_disconnect(hdev, ev->status);
3387 break;
3388
3389 case HCI_OP_ADD_SCO:
3390 hci_cs_add_sco(hdev, ev->status);
3391 break;
3392
3393 case HCI_OP_AUTH_REQUESTED:
3394 hci_cs_auth_requested(hdev, ev->status);
3395 break;
3396
3397 case HCI_OP_SET_CONN_ENCRYPT:
3398 hci_cs_set_conn_encrypt(hdev, ev->status);
3399 break;
3400
3401 case HCI_OP_REMOTE_NAME_REQ:
3402 hci_cs_remote_name_req(hdev, ev->status);
3403 break;
3404
3405 case HCI_OP_READ_REMOTE_FEATURES:
3406 hci_cs_read_remote_features(hdev, ev->status);
3407 break;
3408
3409 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3410 hci_cs_read_remote_ext_features(hdev, ev->status);
3411 break;
3412
3413 case HCI_OP_SETUP_SYNC_CONN:
3414 hci_cs_setup_sync_conn(hdev, ev->status);
3415 break;
3416
3417 case HCI_OP_SNIFF_MODE:
3418 hci_cs_sniff_mode(hdev, ev->status);
3419 break;
3420
3421 case HCI_OP_EXIT_SNIFF_MODE:
3422 hci_cs_exit_sniff_mode(hdev, ev->status);
3423 break;
3424
3425 case HCI_OP_SWITCH_ROLE:
3426 hci_cs_switch_role(hdev, ev->status);
3427 break;
3428
3429 case HCI_OP_LE_CREATE_CONN:
3430 hci_cs_le_create_conn(hdev, ev->status);
3431 break;
3432
3433 case HCI_OP_LE_READ_REMOTE_FEATURES:
3434 hci_cs_le_read_remote_features(hdev, ev->status);
3435 break;
3436
3437 case HCI_OP_LE_START_ENC:
3438 hci_cs_le_start_enc(hdev, ev->status);
3439 break;
3440
3441 case HCI_OP_LE_EXT_CREATE_CONN:
3442 hci_cs_le_ext_create_conn(hdev, ev->status);
3443 break;
3444
3445 default:
3446 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3447 break;
3448 }
3449
3450 if (*opcode != HCI_OP_NOP)
3451 cancel_delayed_work(&hdev->cmd_timer);
3452
3453 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3454 atomic_set(&hdev->cmd_cnt, 1);
3455
3456 /* Indicate request completion if the command failed. Also, if
3457 * we're not waiting for a special event and we get a success
3458 * command status we should try to flag the request as completed
3459 * (since for this kind of commands there will not be a command
3460 * complete event).
3461 */
3462 if (ev->status ||
3463 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3464 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3465 req_complete_skb);
3466
3467 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3468 queue_work(hdev->workqueue, &hdev->cmd_work);
3469 }
3470
hci_hardware_error_evt(struct hci_dev * hdev,struct sk_buff * skb)3471 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3472 {
3473 struct hci_ev_hardware_error *ev = (void *) skb->data;
3474
3475 hdev->hw_error_code = ev->code;
3476
3477 queue_work(hdev->req_workqueue, &hdev->error_reset);
3478 }
3479
hci_role_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3480 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3481 {
3482 struct hci_ev_role_change *ev = (void *) skb->data;
3483 struct hci_conn *conn;
3484
3485 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3486
3487 hci_dev_lock(hdev);
3488
3489 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3490 if (conn) {
3491 if (!ev->status)
3492 conn->role = ev->role;
3493
3494 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3495
3496 hci_role_switch_cfm(conn, ev->status, ev->role);
3497 }
3498
3499 hci_dev_unlock(hdev);
3500 }
3501
hci_num_comp_pkts_evt(struct hci_dev * hdev,struct sk_buff * skb)3502 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3503 {
3504 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3505 int i;
3506
3507 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3508 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3509 return;
3510 }
3511
3512 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3513 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3514 BT_DBG("%s bad parameters", hdev->name);
3515 return;
3516 }
3517
3518 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3519
3520 for (i = 0; i < ev->num_hndl; i++) {
3521 struct hci_comp_pkts_info *info = &ev->handles[i];
3522 struct hci_conn *conn;
3523 __u16 handle, count;
3524
3525 handle = __le16_to_cpu(info->handle);
3526 count = __le16_to_cpu(info->count);
3527
3528 conn = hci_conn_hash_lookup_handle(hdev, handle);
3529 if (!conn)
3530 continue;
3531
3532 conn->sent -= count;
3533
3534 switch (conn->type) {
3535 case ACL_LINK:
3536 hdev->acl_cnt += count;
3537 if (hdev->acl_cnt > hdev->acl_pkts)
3538 hdev->acl_cnt = hdev->acl_pkts;
3539 break;
3540
3541 case LE_LINK:
3542 if (hdev->le_pkts) {
3543 hdev->le_cnt += count;
3544 if (hdev->le_cnt > hdev->le_pkts)
3545 hdev->le_cnt = hdev->le_pkts;
3546 } else {
3547 hdev->acl_cnt += count;
3548 if (hdev->acl_cnt > hdev->acl_pkts)
3549 hdev->acl_cnt = hdev->acl_pkts;
3550 }
3551 break;
3552
3553 case SCO_LINK:
3554 hdev->sco_cnt += count;
3555 if (hdev->sco_cnt > hdev->sco_pkts)
3556 hdev->sco_cnt = hdev->sco_pkts;
3557 break;
3558
3559 default:
3560 bt_dev_err(hdev, "unknown type %d conn %p",
3561 conn->type, conn);
3562 break;
3563 }
3564 }
3565
3566 queue_work(hdev->workqueue, &hdev->tx_work);
3567 }
3568
__hci_conn_lookup_handle(struct hci_dev * hdev,__u16 handle)3569 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3570 __u16 handle)
3571 {
3572 struct hci_chan *chan;
3573
3574 switch (hdev->dev_type) {
3575 case HCI_PRIMARY:
3576 return hci_conn_hash_lookup_handle(hdev, handle);
3577 case HCI_AMP:
3578 chan = hci_chan_lookup_handle(hdev, handle);
3579 if (chan)
3580 return chan->conn;
3581 break;
3582 default:
3583 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3584 break;
3585 }
3586
3587 return NULL;
3588 }
3589
hci_num_comp_blocks_evt(struct hci_dev * hdev,struct sk_buff * skb)3590 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3591 {
3592 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3593 int i;
3594
3595 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3596 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3597 return;
3598 }
3599
3600 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3601 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3602 BT_DBG("%s bad parameters", hdev->name);
3603 return;
3604 }
3605
3606 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3607 ev->num_hndl);
3608
3609 for (i = 0; i < ev->num_hndl; i++) {
3610 struct hci_comp_blocks_info *info = &ev->handles[i];
3611 struct hci_conn *conn = NULL;
3612 __u16 handle, block_count;
3613
3614 handle = __le16_to_cpu(info->handle);
3615 block_count = __le16_to_cpu(info->blocks);
3616
3617 conn = __hci_conn_lookup_handle(hdev, handle);
3618 if (!conn)
3619 continue;
3620
3621 conn->sent -= block_count;
3622
3623 switch (conn->type) {
3624 case ACL_LINK:
3625 case AMP_LINK:
3626 hdev->block_cnt += block_count;
3627 if (hdev->block_cnt > hdev->num_blocks)
3628 hdev->block_cnt = hdev->num_blocks;
3629 break;
3630
3631 default:
3632 bt_dev_err(hdev, "unknown type %d conn %p",
3633 conn->type, conn);
3634 break;
3635 }
3636 }
3637
3638 queue_work(hdev->workqueue, &hdev->tx_work);
3639 }
3640
hci_mode_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3641 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3642 {
3643 struct hci_ev_mode_change *ev = (void *) skb->data;
3644 struct hci_conn *conn;
3645
3646 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3647
3648 hci_dev_lock(hdev);
3649
3650 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3651 if (conn) {
3652 conn->mode = ev->mode;
3653
3654 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3655 &conn->flags)) {
3656 if (conn->mode == HCI_CM_ACTIVE)
3657 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3658 else
3659 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3660 }
3661
3662 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3663 hci_sco_setup(conn, ev->status);
3664 }
3665
3666 hci_dev_unlock(hdev);
3667 }
3668
hci_pin_code_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3669 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3670 {
3671 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3672 struct hci_conn *conn;
3673
3674 BT_DBG("%s", hdev->name);
3675
3676 hci_dev_lock(hdev);
3677
3678 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3679 if (!conn)
3680 goto unlock;
3681
3682 if (conn->state == BT_CONNECTED) {
3683 hci_conn_hold(conn);
3684 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3685 hci_conn_drop(conn);
3686 }
3687
3688 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3689 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3690 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3691 sizeof(ev->bdaddr), &ev->bdaddr);
3692 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3693 u8 secure;
3694
3695 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3696 secure = 1;
3697 else
3698 secure = 0;
3699
3700 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3701 }
3702
3703 unlock:
3704 hci_dev_unlock(hdev);
3705 }
3706
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)3707 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3708 {
3709 if (key_type == HCI_LK_CHANGED_COMBINATION)
3710 return;
3711
3712 conn->pin_length = pin_len;
3713 conn->key_type = key_type;
3714
3715 switch (key_type) {
3716 case HCI_LK_LOCAL_UNIT:
3717 case HCI_LK_REMOTE_UNIT:
3718 case HCI_LK_DEBUG_COMBINATION:
3719 return;
3720 case HCI_LK_COMBINATION:
3721 if (pin_len == 16)
3722 conn->pending_sec_level = BT_SECURITY_HIGH;
3723 else
3724 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3725 break;
3726 case HCI_LK_UNAUTH_COMBINATION_P192:
3727 case HCI_LK_UNAUTH_COMBINATION_P256:
3728 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3729 break;
3730 case HCI_LK_AUTH_COMBINATION_P192:
3731 conn->pending_sec_level = BT_SECURITY_HIGH;
3732 break;
3733 case HCI_LK_AUTH_COMBINATION_P256:
3734 conn->pending_sec_level = BT_SECURITY_FIPS;
3735 break;
3736 }
3737 }
3738
hci_link_key_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3739 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3740 {
3741 struct hci_ev_link_key_req *ev = (void *) skb->data;
3742 struct hci_cp_link_key_reply cp;
3743 struct hci_conn *conn;
3744 struct link_key *key;
3745
3746 BT_DBG("%s", hdev->name);
3747
3748 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3749 return;
3750
3751 hci_dev_lock(hdev);
3752
3753 key = hci_find_link_key(hdev, &ev->bdaddr);
3754 if (!key) {
3755 BT_DBG("%s link key not found for %pMR", hdev->name,
3756 &ev->bdaddr);
3757 goto not_found;
3758 }
3759
3760 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3761 &ev->bdaddr);
3762
3763 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3764 if (conn) {
3765 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3766
3767 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3768 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3769 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3770 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3771 goto not_found;
3772 }
3773
3774 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3775 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3776 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3777 BT_DBG("%s ignoring key unauthenticated for high security",
3778 hdev->name);
3779 goto not_found;
3780 }
3781
3782 conn_set_key(conn, key->type, key->pin_len);
3783 }
3784
3785 bacpy(&cp.bdaddr, &ev->bdaddr);
3786 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3787
3788 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3789
3790 hci_dev_unlock(hdev);
3791
3792 return;
3793
3794 not_found:
3795 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3796 hci_dev_unlock(hdev);
3797 }
3798
hci_link_key_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)3799 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3800 {
3801 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3802 struct hci_conn *conn;
3803 struct link_key *key;
3804 bool persistent;
3805 u8 pin_len = 0;
3806
3807 BT_DBG("%s", hdev->name);
3808
3809 hci_dev_lock(hdev);
3810
3811 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3812 if (!conn)
3813 goto unlock;
3814
3815 hci_conn_hold(conn);
3816 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3817 hci_conn_drop(conn);
3818
3819 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3820 conn_set_key(conn, ev->key_type, conn->pin_length);
3821
3822 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3823 goto unlock;
3824
3825 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3826 ev->key_type, pin_len, &persistent);
3827 if (!key)
3828 goto unlock;
3829
3830 /* Update connection information since adding the key will have
3831 * fixed up the type in the case of changed combination keys.
3832 */
3833 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3834 conn_set_key(conn, key->type, key->pin_len);
3835
3836 mgmt_new_link_key(hdev, key, persistent);
3837
3838 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3839 * is set. If it's not set simply remove the key from the kernel
3840 * list (we've still notified user space about it but with
3841 * store_hint being 0).
3842 */
3843 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3844 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3845 list_del_rcu(&key->list);
3846 kfree_rcu(key, rcu);
3847 goto unlock;
3848 }
3849
3850 if (persistent)
3851 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3852 else
3853 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3854
3855 unlock:
3856 hci_dev_unlock(hdev);
3857 }
3858
hci_clock_offset_evt(struct hci_dev * hdev,struct sk_buff * skb)3859 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3860 {
3861 struct hci_ev_clock_offset *ev = (void *) skb->data;
3862 struct hci_conn *conn;
3863
3864 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3865
3866 hci_dev_lock(hdev);
3867
3868 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3869 if (conn && !ev->status) {
3870 struct inquiry_entry *ie;
3871
3872 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3873 if (ie) {
3874 ie->data.clock_offset = ev->clock_offset;
3875 ie->timestamp = jiffies;
3876 }
3877 }
3878
3879 hci_dev_unlock(hdev);
3880 }
3881
hci_pkt_type_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3882 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3883 {
3884 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3885 struct hci_conn *conn;
3886
3887 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3888
3889 hci_dev_lock(hdev);
3890
3891 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3892 if (conn && !ev->status)
3893 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3894
3895 hci_dev_unlock(hdev);
3896 }
3897
hci_pscan_rep_mode_evt(struct hci_dev * hdev,struct sk_buff * skb)3898 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3899 {
3900 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3901 struct inquiry_entry *ie;
3902
3903 BT_DBG("%s", hdev->name);
3904
3905 hci_dev_lock(hdev);
3906
3907 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3908 if (ie) {
3909 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3910 ie->timestamp = jiffies;
3911 }
3912
3913 hci_dev_unlock(hdev);
3914 }
3915
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,struct sk_buff * skb)3916 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3917 struct sk_buff *skb)
3918 {
3919 struct inquiry_data data;
3920 int num_rsp = *((__u8 *) skb->data);
3921
3922 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3923
3924 if (!num_rsp)
3925 return;
3926
3927 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3928 return;
3929
3930 hci_dev_lock(hdev);
3931
3932 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3933 struct inquiry_info_with_rssi_and_pscan_mode *info;
3934 info = (void *) (skb->data + 1);
3935
3936 for (; num_rsp; num_rsp--, info++) {
3937 u32 flags;
3938
3939 bacpy(&data.bdaddr, &info->bdaddr);
3940 data.pscan_rep_mode = info->pscan_rep_mode;
3941 data.pscan_period_mode = info->pscan_period_mode;
3942 data.pscan_mode = info->pscan_mode;
3943 memcpy(data.dev_class, info->dev_class, 3);
3944 data.clock_offset = info->clock_offset;
3945 data.rssi = info->rssi;
3946 data.ssp_mode = 0x00;
3947
3948 flags = hci_inquiry_cache_update(hdev, &data, false);
3949
3950 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3951 info->dev_class, info->rssi,
3952 flags, NULL, 0, NULL, 0);
3953 }
3954 } else {
3955 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3956
3957 for (; num_rsp; num_rsp--, info++) {
3958 u32 flags;
3959
3960 bacpy(&data.bdaddr, &info->bdaddr);
3961 data.pscan_rep_mode = info->pscan_rep_mode;
3962 data.pscan_period_mode = info->pscan_period_mode;
3963 data.pscan_mode = 0x00;
3964 memcpy(data.dev_class, info->dev_class, 3);
3965 data.clock_offset = info->clock_offset;
3966 data.rssi = info->rssi;
3967 data.ssp_mode = 0x00;
3968
3969 flags = hci_inquiry_cache_update(hdev, &data, false);
3970
3971 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3972 info->dev_class, info->rssi,
3973 flags, NULL, 0, NULL, 0);
3974 }
3975 }
3976
3977 hci_dev_unlock(hdev);
3978 }
3979
hci_remote_ext_features_evt(struct hci_dev * hdev,struct sk_buff * skb)3980 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3981 struct sk_buff *skb)
3982 {
3983 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3984 struct hci_conn *conn;
3985
3986 BT_DBG("%s", hdev->name);
3987
3988 hci_dev_lock(hdev);
3989
3990 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3991 if (!conn)
3992 goto unlock;
3993
3994 if (ev->page < HCI_MAX_PAGES)
3995 memcpy(conn->features[ev->page], ev->features, 8);
3996
3997 if (!ev->status && ev->page == 0x01) {
3998 struct inquiry_entry *ie;
3999
4000 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4001 if (ie)
4002 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4003
4004 if (ev->features[0] & LMP_HOST_SSP) {
4005 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4006 } else {
4007 /* It is mandatory by the Bluetooth specification that
4008 * Extended Inquiry Results are only used when Secure
4009 * Simple Pairing is enabled, but some devices violate
4010 * this.
4011 *
4012 * To make these devices work, the internal SSP
4013 * enabled flag needs to be cleared if the remote host
4014 * features do not indicate SSP support */
4015 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4016 }
4017
4018 if (ev->features[0] & LMP_HOST_SC)
4019 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4020 }
4021
4022 if (conn->state != BT_CONFIG)
4023 goto unlock;
4024
4025 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4026 struct hci_cp_remote_name_req cp;
4027 memset(&cp, 0, sizeof(cp));
4028 bacpy(&cp.bdaddr, &conn->dst);
4029 cp.pscan_rep_mode = 0x02;
4030 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4031 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4032 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4033
4034 if (!hci_outgoing_auth_needed(hdev, conn)) {
4035 conn->state = BT_CONNECTED;
4036 hci_connect_cfm(conn, ev->status);
4037 hci_conn_drop(conn);
4038 }
4039
4040 unlock:
4041 hci_dev_unlock(hdev);
4042 }
4043
hci_sync_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4044 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4045 struct sk_buff *skb)
4046 {
4047 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4048 struct hci_conn *conn;
4049
4050 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4051
4052 hci_dev_lock(hdev);
4053
4054 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4055 if (!conn) {
4056 if (ev->link_type == ESCO_LINK)
4057 goto unlock;
4058
4059 /* When the link type in the event indicates SCO connection
4060 * and lookup of the connection object fails, then check
4061 * if an eSCO connection object exists.
4062 *
4063 * The core limits the synchronous connections to either
4064 * SCO or eSCO. The eSCO connection is preferred and tried
4065 * to be setup first and until successfully established,
4066 * the link type will be hinted as eSCO.
4067 */
4068 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4069 if (!conn)
4070 goto unlock;
4071 }
4072
4073 switch (ev->status) {
4074 case 0x00:
4075 conn->handle = __le16_to_cpu(ev->handle);
4076 conn->state = BT_CONNECTED;
4077 conn->type = ev->link_type;
4078
4079 hci_debugfs_create_conn(conn);
4080 hci_conn_add_sysfs(conn);
4081 break;
4082
4083 case 0x10: /* Connection Accept Timeout */
4084 case 0x0d: /* Connection Rejected due to Limited Resources */
4085 case 0x11: /* Unsupported Feature or Parameter Value */
4086 case 0x1c: /* SCO interval rejected */
4087 case 0x1a: /* Unsupported Remote Feature */
4088 case 0x1f: /* Unspecified error */
4089 case 0x20: /* Unsupported LMP Parameter value */
4090 if (conn->out) {
4091 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4092 (hdev->esco_type & EDR_ESCO_MASK);
4093 if (hci_setup_sync(conn, conn->link->handle))
4094 goto unlock;
4095 }
4096 /* fall through */
4097
4098 default:
4099 conn->state = BT_CLOSED;
4100 break;
4101 }
4102
4103 hci_connect_cfm(conn, ev->status);
4104 if (ev->status)
4105 hci_conn_del(conn);
4106
4107 unlock:
4108 hci_dev_unlock(hdev);
4109 }
4110
eir_get_length(u8 * eir,size_t eir_len)4111 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4112 {
4113 size_t parsed = 0;
4114
4115 while (parsed < eir_len) {
4116 u8 field_len = eir[0];
4117
4118 if (field_len == 0)
4119 return parsed;
4120
4121 parsed += field_len + 1;
4122 eir += field_len + 1;
4123 }
4124
4125 return eir_len;
4126 }
4127
hci_extended_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)4128 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4129 struct sk_buff *skb)
4130 {
4131 struct inquiry_data data;
4132 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4133 int num_rsp = *((__u8 *) skb->data);
4134 size_t eir_len;
4135
4136 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4137
4138 if (!num_rsp)
4139 return;
4140
4141 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4142 return;
4143
4144 hci_dev_lock(hdev);
4145
4146 for (; num_rsp; num_rsp--, info++) {
4147 u32 flags;
4148 bool name_known;
4149
4150 bacpy(&data.bdaddr, &info->bdaddr);
4151 data.pscan_rep_mode = info->pscan_rep_mode;
4152 data.pscan_period_mode = info->pscan_period_mode;
4153 data.pscan_mode = 0x00;
4154 memcpy(data.dev_class, info->dev_class, 3);
4155 data.clock_offset = info->clock_offset;
4156 data.rssi = info->rssi;
4157 data.ssp_mode = 0x01;
4158
4159 if (hci_dev_test_flag(hdev, HCI_MGMT))
4160 name_known = eir_get_data(info->data,
4161 sizeof(info->data),
4162 EIR_NAME_COMPLETE, NULL);
4163 else
4164 name_known = true;
4165
4166 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4167
4168 eir_len = eir_get_length(info->data, sizeof(info->data));
4169
4170 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4171 info->dev_class, info->rssi,
4172 flags, info->data, eir_len, NULL, 0);
4173 }
4174
4175 hci_dev_unlock(hdev);
4176 }
4177
hci_key_refresh_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4178 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4179 struct sk_buff *skb)
4180 {
4181 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4182 struct hci_conn *conn;
4183
4184 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4185 __le16_to_cpu(ev->handle));
4186
4187 hci_dev_lock(hdev);
4188
4189 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4190 if (!conn)
4191 goto unlock;
4192
4193 /* For BR/EDR the necessary steps are taken through the
4194 * auth_complete event.
4195 */
4196 if (conn->type != LE_LINK)
4197 goto unlock;
4198
4199 if (!ev->status)
4200 conn->sec_level = conn->pending_sec_level;
4201
4202 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4203
4204 if (ev->status && conn->state == BT_CONNECTED) {
4205 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4206 hci_conn_drop(conn);
4207 goto unlock;
4208 }
4209
4210 if (conn->state == BT_CONFIG) {
4211 if (!ev->status)
4212 conn->state = BT_CONNECTED;
4213
4214 hci_connect_cfm(conn, ev->status);
4215 hci_conn_drop(conn);
4216 } else {
4217 hci_auth_cfm(conn, ev->status);
4218
4219 hci_conn_hold(conn);
4220 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4221 hci_conn_drop(conn);
4222 }
4223
4224 unlock:
4225 hci_dev_unlock(hdev);
4226 }
4227
hci_get_auth_req(struct hci_conn * conn)4228 static u8 hci_get_auth_req(struct hci_conn *conn)
4229 {
4230 /* If remote requests no-bonding follow that lead */
4231 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4232 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4233 return conn->remote_auth | (conn->auth_type & 0x01);
4234
4235 /* If both remote and local have enough IO capabilities, require
4236 * MITM protection
4237 */
4238 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4239 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4240 return conn->remote_auth | 0x01;
4241
4242 /* No MITM protection possible so ignore remote requirement */
4243 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4244 }
4245
bredr_oob_data_present(struct hci_conn * conn)4246 static u8 bredr_oob_data_present(struct hci_conn *conn)
4247 {
4248 struct hci_dev *hdev = conn->hdev;
4249 struct oob_data *data;
4250
4251 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4252 if (!data)
4253 return 0x00;
4254
4255 if (bredr_sc_enabled(hdev)) {
4256 /* When Secure Connections is enabled, then just
4257 * return the present value stored with the OOB
4258 * data. The stored value contains the right present
4259 * information. However it can only be trusted when
4260 * not in Secure Connection Only mode.
4261 */
4262 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4263 return data->present;
4264
4265 /* When Secure Connections Only mode is enabled, then
4266 * the P-256 values are required. If they are not
4267 * available, then do not declare that OOB data is
4268 * present.
4269 */
4270 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4271 !memcmp(data->hash256, ZERO_KEY, 16))
4272 return 0x00;
4273
4274 return 0x02;
4275 }
4276
4277 /* When Secure Connections is not enabled or actually
4278 * not supported by the hardware, then check that if
4279 * P-192 data values are present.
4280 */
4281 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4282 !memcmp(data->hash192, ZERO_KEY, 16))
4283 return 0x00;
4284
4285 return 0x01;
4286 }
4287
hci_io_capa_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4288 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4289 {
4290 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4291 struct hci_conn *conn;
4292
4293 BT_DBG("%s", hdev->name);
4294
4295 hci_dev_lock(hdev);
4296
4297 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4298 if (!conn)
4299 goto unlock;
4300
4301 hci_conn_hold(conn);
4302
4303 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4304 goto unlock;
4305
4306 /* Allow pairing if we're pairable, the initiators of the
4307 * pairing or if the remote is not requesting bonding.
4308 */
4309 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4310 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4311 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4312 struct hci_cp_io_capability_reply cp;
4313
4314 bacpy(&cp.bdaddr, &ev->bdaddr);
4315 /* Change the IO capability from KeyboardDisplay
4316 * to DisplayYesNo as it is not supported by BT spec. */
4317 cp.capability = (conn->io_capability == 0x04) ?
4318 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4319
4320 /* If we are initiators, there is no remote information yet */
4321 if (conn->remote_auth == 0xff) {
4322 /* Request MITM protection if our IO caps allow it
4323 * except for the no-bonding case.
4324 */
4325 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4326 conn->auth_type != HCI_AT_NO_BONDING)
4327 conn->auth_type |= 0x01;
4328 } else {
4329 conn->auth_type = hci_get_auth_req(conn);
4330 }
4331
4332 /* If we're not bondable, force one of the non-bondable
4333 * authentication requirement values.
4334 */
4335 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4336 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4337
4338 cp.authentication = conn->auth_type;
4339 cp.oob_data = bredr_oob_data_present(conn);
4340
4341 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4342 sizeof(cp), &cp);
4343 } else {
4344 struct hci_cp_io_capability_neg_reply cp;
4345
4346 bacpy(&cp.bdaddr, &ev->bdaddr);
4347 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4348
4349 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4350 sizeof(cp), &cp);
4351 }
4352
4353 unlock:
4354 hci_dev_unlock(hdev);
4355 }
4356
hci_io_capa_reply_evt(struct hci_dev * hdev,struct sk_buff * skb)4357 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4358 {
4359 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4360 struct hci_conn *conn;
4361
4362 BT_DBG("%s", hdev->name);
4363
4364 hci_dev_lock(hdev);
4365
4366 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4367 if (!conn)
4368 goto unlock;
4369
4370 conn->remote_cap = ev->capability;
4371 conn->remote_auth = ev->authentication;
4372
4373 unlock:
4374 hci_dev_unlock(hdev);
4375 }
4376
hci_user_confirm_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4377 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4378 struct sk_buff *skb)
4379 {
4380 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4381 int loc_mitm, rem_mitm, confirm_hint = 0;
4382 struct hci_conn *conn;
4383
4384 BT_DBG("%s", hdev->name);
4385
4386 hci_dev_lock(hdev);
4387
4388 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4389 goto unlock;
4390
4391 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4392 if (!conn)
4393 goto unlock;
4394
4395 loc_mitm = (conn->auth_type & 0x01);
4396 rem_mitm = (conn->remote_auth & 0x01);
4397
4398 /* If we require MITM but the remote device can't provide that
4399 * (it has NoInputNoOutput) then reject the confirmation
4400 * request. We check the security level here since it doesn't
4401 * necessarily match conn->auth_type.
4402 */
4403 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4404 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4405 BT_DBG("Rejecting request: remote device can't provide MITM");
4406 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4407 sizeof(ev->bdaddr), &ev->bdaddr);
4408 goto unlock;
4409 }
4410
4411 /* If no side requires MITM protection; auto-accept */
4412 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4413 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4414
4415 /* If we're not the initiators request authorization to
4416 * proceed from user space (mgmt_user_confirm with
4417 * confirm_hint set to 1). The exception is if neither
4418 * side had MITM or if the local IO capability is
4419 * NoInputNoOutput, in which case we do auto-accept
4420 */
4421 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4422 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4423 (loc_mitm || rem_mitm)) {
4424 BT_DBG("Confirming auto-accept as acceptor");
4425 confirm_hint = 1;
4426 goto confirm;
4427 }
4428
4429 BT_DBG("Auto-accept of user confirmation with %ums delay",
4430 hdev->auto_accept_delay);
4431
4432 if (hdev->auto_accept_delay > 0) {
4433 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4434 queue_delayed_work(conn->hdev->workqueue,
4435 &conn->auto_accept_work, delay);
4436 goto unlock;
4437 }
4438
4439 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4440 sizeof(ev->bdaddr), &ev->bdaddr);
4441 goto unlock;
4442 }
4443
4444 confirm:
4445 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4446 le32_to_cpu(ev->passkey), confirm_hint);
4447
4448 unlock:
4449 hci_dev_unlock(hdev);
4450 }
4451
hci_user_passkey_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4452 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4453 struct sk_buff *skb)
4454 {
4455 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4456
4457 BT_DBG("%s", hdev->name);
4458
4459 if (hci_dev_test_flag(hdev, HCI_MGMT))
4460 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4461 }
4462
hci_user_passkey_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4463 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4464 struct sk_buff *skb)
4465 {
4466 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4467 struct hci_conn *conn;
4468
4469 BT_DBG("%s", hdev->name);
4470
4471 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4472 if (!conn)
4473 return;
4474
4475 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4476 conn->passkey_entered = 0;
4477
4478 if (hci_dev_test_flag(hdev, HCI_MGMT))
4479 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4480 conn->dst_type, conn->passkey_notify,
4481 conn->passkey_entered);
4482 }
4483
hci_keypress_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4484 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4485 {
4486 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4487 struct hci_conn *conn;
4488
4489 BT_DBG("%s", hdev->name);
4490
4491 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4492 if (!conn)
4493 return;
4494
4495 switch (ev->type) {
4496 case HCI_KEYPRESS_STARTED:
4497 conn->passkey_entered = 0;
4498 return;
4499
4500 case HCI_KEYPRESS_ENTERED:
4501 conn->passkey_entered++;
4502 break;
4503
4504 case HCI_KEYPRESS_ERASED:
4505 conn->passkey_entered--;
4506 break;
4507
4508 case HCI_KEYPRESS_CLEARED:
4509 conn->passkey_entered = 0;
4510 break;
4511
4512 case HCI_KEYPRESS_COMPLETED:
4513 return;
4514 }
4515
4516 if (hci_dev_test_flag(hdev, HCI_MGMT))
4517 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4518 conn->dst_type, conn->passkey_notify,
4519 conn->passkey_entered);
4520 }
4521
hci_simple_pair_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4522 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4523 struct sk_buff *skb)
4524 {
4525 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4526 struct hci_conn *conn;
4527
4528 BT_DBG("%s", hdev->name);
4529
4530 hci_dev_lock(hdev);
4531
4532 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4533 if (!conn)
4534 goto unlock;
4535
4536 /* Reset the authentication requirement to unknown */
4537 conn->remote_auth = 0xff;
4538
4539 /* To avoid duplicate auth_failed events to user space we check
4540 * the HCI_CONN_AUTH_PEND flag which will be set if we
4541 * initiated the authentication. A traditional auth_complete
4542 * event gets always produced as initiator and is also mapped to
4543 * the mgmt_auth_failed event */
4544 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4545 mgmt_auth_failed(conn, ev->status);
4546
4547 hci_conn_drop(conn);
4548
4549 unlock:
4550 hci_dev_unlock(hdev);
4551 }
4552
hci_remote_host_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4553 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4554 struct sk_buff *skb)
4555 {
4556 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4557 struct inquiry_entry *ie;
4558 struct hci_conn *conn;
4559
4560 BT_DBG("%s", hdev->name);
4561
4562 hci_dev_lock(hdev);
4563
4564 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4565 if (conn)
4566 memcpy(conn->features[1], ev->features, 8);
4567
4568 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4569 if (ie)
4570 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4571
4572 hci_dev_unlock(hdev);
4573 }
4574
hci_remote_oob_data_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4575 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4576 struct sk_buff *skb)
4577 {
4578 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4579 struct oob_data *data;
4580
4581 BT_DBG("%s", hdev->name);
4582
4583 hci_dev_lock(hdev);
4584
4585 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4586 goto unlock;
4587
4588 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4589 if (!data) {
4590 struct hci_cp_remote_oob_data_neg_reply cp;
4591
4592 bacpy(&cp.bdaddr, &ev->bdaddr);
4593 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4594 sizeof(cp), &cp);
4595 goto unlock;
4596 }
4597
4598 if (bredr_sc_enabled(hdev)) {
4599 struct hci_cp_remote_oob_ext_data_reply cp;
4600
4601 bacpy(&cp.bdaddr, &ev->bdaddr);
4602 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4603 memset(cp.hash192, 0, sizeof(cp.hash192));
4604 memset(cp.rand192, 0, sizeof(cp.rand192));
4605 } else {
4606 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4607 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4608 }
4609 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4610 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4611
4612 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4613 sizeof(cp), &cp);
4614 } else {
4615 struct hci_cp_remote_oob_data_reply cp;
4616
4617 bacpy(&cp.bdaddr, &ev->bdaddr);
4618 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4619 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4620
4621 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4622 sizeof(cp), &cp);
4623 }
4624
4625 unlock:
4626 hci_dev_unlock(hdev);
4627 }
4628
4629 #if IS_ENABLED(CONFIG_BT_HS)
hci_chan_selected_evt(struct hci_dev * hdev,struct sk_buff * skb)4630 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4631 {
4632 struct hci_ev_channel_selected *ev = (void *)skb->data;
4633 struct hci_conn *hcon;
4634
4635 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4636
4637 skb_pull(skb, sizeof(*ev));
4638
4639 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4640 if (!hcon)
4641 return;
4642
4643 amp_read_loc_assoc_final_data(hdev, hcon);
4644 }
4645
hci_phy_link_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4646 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4647 struct sk_buff *skb)
4648 {
4649 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4650 struct hci_conn *hcon, *bredr_hcon;
4651
4652 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4653 ev->status);
4654
4655 hci_dev_lock(hdev);
4656
4657 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4658 if (!hcon) {
4659 hci_dev_unlock(hdev);
4660 return;
4661 }
4662
4663 if (ev->status) {
4664 hci_conn_del(hcon);
4665 hci_dev_unlock(hdev);
4666 return;
4667 }
4668
4669 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4670
4671 hcon->state = BT_CONNECTED;
4672 bacpy(&hcon->dst, &bredr_hcon->dst);
4673
4674 hci_conn_hold(hcon);
4675 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4676 hci_conn_drop(hcon);
4677
4678 hci_debugfs_create_conn(hcon);
4679 hci_conn_add_sysfs(hcon);
4680
4681 amp_physical_cfm(bredr_hcon, hcon);
4682
4683 hci_dev_unlock(hdev);
4684 }
4685
hci_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4686 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4687 {
4688 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4689 struct hci_conn *hcon;
4690 struct hci_chan *hchan;
4691 struct amp_mgr *mgr;
4692
4693 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4694 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4695 ev->status);
4696
4697 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4698 if (!hcon)
4699 return;
4700
4701 /* Create AMP hchan */
4702 hchan = hci_chan_create(hcon);
4703 if (!hchan)
4704 return;
4705
4706 hchan->handle = le16_to_cpu(ev->handle);
4707
4708 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4709
4710 mgr = hcon->amp_mgr;
4711 if (mgr && mgr->bredr_chan) {
4712 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4713
4714 l2cap_chan_lock(bredr_chan);
4715
4716 bredr_chan->conn->mtu = hdev->block_mtu;
4717 l2cap_logical_cfm(bredr_chan, hchan, 0);
4718 hci_conn_hold(hcon);
4719
4720 l2cap_chan_unlock(bredr_chan);
4721 }
4722 }
4723
hci_disconn_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4724 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4725 struct sk_buff *skb)
4726 {
4727 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4728 struct hci_chan *hchan;
4729
4730 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4731 le16_to_cpu(ev->handle), ev->status);
4732
4733 if (ev->status)
4734 return;
4735
4736 hci_dev_lock(hdev);
4737
4738 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4739 if (!hchan)
4740 goto unlock;
4741
4742 amp_destroy_logical_link(hchan, ev->reason);
4743
4744 unlock:
4745 hci_dev_unlock(hdev);
4746 }
4747
hci_disconn_phylink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4748 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4749 struct sk_buff *skb)
4750 {
4751 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4752 struct hci_conn *hcon;
4753
4754 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4755
4756 if (ev->status)
4757 return;
4758
4759 hci_dev_lock(hdev);
4760
4761 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4762 if (hcon) {
4763 hcon->state = BT_CLOSED;
4764 hci_conn_del(hcon);
4765 }
4766
4767 hci_dev_unlock(hdev);
4768 }
4769 #endif
4770
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)4771 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4772 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
4773 u16 interval, u16 latency, u16 supervision_timeout)
4774 {
4775 struct hci_conn_params *params;
4776 struct hci_conn *conn;
4777 struct smp_irk *irk;
4778 u8 addr_type;
4779
4780 hci_dev_lock(hdev);
4781
4782 /* All controllers implicitly stop advertising in the event of a
4783 * connection, so ensure that the state bit is cleared.
4784 */
4785 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4786
4787 conn = hci_lookup_le_connect(hdev);
4788 if (!conn) {
4789 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
4790 if (!conn) {
4791 bt_dev_err(hdev, "no memory for new connection");
4792 goto unlock;
4793 }
4794
4795 conn->dst_type = bdaddr_type;
4796
4797 /* If we didn't have a hci_conn object previously
4798 * but we're in master role this must be something
4799 * initiated using a white list. Since white list based
4800 * connections are not "first class citizens" we don't
4801 * have full tracking of them. Therefore, we go ahead
4802 * with a "best effort" approach of determining the
4803 * initiator address based on the HCI_PRIVACY flag.
4804 */
4805 if (conn->out) {
4806 conn->resp_addr_type = bdaddr_type;
4807 bacpy(&conn->resp_addr, bdaddr);
4808 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4809 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4810 bacpy(&conn->init_addr, &hdev->rpa);
4811 } else {
4812 hci_copy_identity_address(hdev,
4813 &conn->init_addr,
4814 &conn->init_addr_type);
4815 }
4816 }
4817 } else {
4818 cancel_delayed_work(&conn->le_conn_timeout);
4819 }
4820
4821 if (!conn->out) {
4822 /* Set the responder (our side) address type based on
4823 * the advertising address type.
4824 */
4825 conn->resp_addr_type = hdev->adv_addr_type;
4826 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
4827 /* In case of ext adv, resp_addr will be updated in
4828 * Adv Terminated event.
4829 */
4830 if (!ext_adv_capable(hdev))
4831 bacpy(&conn->resp_addr, &hdev->random_addr);
4832 } else {
4833 bacpy(&conn->resp_addr, &hdev->bdaddr);
4834 }
4835
4836 conn->init_addr_type = bdaddr_type;
4837 bacpy(&conn->init_addr, bdaddr);
4838
4839 /* For incoming connections, set the default minimum
4840 * and maximum connection interval. They will be used
4841 * to check if the parameters are in range and if not
4842 * trigger the connection update procedure.
4843 */
4844 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4845 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4846 }
4847
4848 /* Lookup the identity address from the stored connection
4849 * address and address type.
4850 *
4851 * When establishing connections to an identity address, the
4852 * connection procedure will store the resolvable random
4853 * address first. Now if it can be converted back into the
4854 * identity address, start using the identity address from
4855 * now on.
4856 */
4857 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4858 if (irk) {
4859 bacpy(&conn->dst, &irk->bdaddr);
4860 conn->dst_type = irk->addr_type;
4861 }
4862
4863 if (status) {
4864 hci_le_conn_failed(conn, status);
4865 goto unlock;
4866 }
4867
4868 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4869 addr_type = BDADDR_LE_PUBLIC;
4870 else
4871 addr_type = BDADDR_LE_RANDOM;
4872
4873 /* Drop the connection if the device is blocked */
4874 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4875 hci_conn_drop(conn);
4876 goto unlock;
4877 }
4878
4879 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4880 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4881
4882 conn->sec_level = BT_SECURITY_LOW;
4883 conn->handle = handle;
4884 conn->state = BT_CONFIG;
4885
4886 conn->le_conn_interval = interval;
4887 conn->le_conn_latency = latency;
4888 conn->le_supv_timeout = supervision_timeout;
4889
4890 hci_debugfs_create_conn(conn);
4891 hci_conn_add_sysfs(conn);
4892
4893 if (!status) {
4894 /* The remote features procedure is defined for master
4895 * role only. So only in case of an initiated connection
4896 * request the remote features.
4897 *
4898 * If the local controller supports slave-initiated features
4899 * exchange, then requesting the remote features in slave
4900 * role is possible. Otherwise just transition into the
4901 * connected state without requesting the remote features.
4902 */
4903 if (conn->out ||
4904 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4905 struct hci_cp_le_read_remote_features cp;
4906
4907 cp.handle = __cpu_to_le16(conn->handle);
4908
4909 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4910 sizeof(cp), &cp);
4911
4912 hci_conn_hold(conn);
4913 } else {
4914 conn->state = BT_CONNECTED;
4915 hci_connect_cfm(conn, status);
4916 }
4917 } else {
4918 hci_connect_cfm(conn, status);
4919 }
4920
4921 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4922 conn->dst_type);
4923 if (params) {
4924 list_del_init(¶ms->action);
4925 if (params->conn) {
4926 hci_conn_drop(params->conn);
4927 hci_conn_put(params->conn);
4928 params->conn = NULL;
4929 }
4930 }
4931
4932 unlock:
4933 hci_update_background_scan(hdev);
4934 hci_dev_unlock(hdev);
4935 }
4936
hci_le_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4937 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4938 {
4939 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4940
4941 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4942
4943 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
4944 ev->role, le16_to_cpu(ev->handle),
4945 le16_to_cpu(ev->interval),
4946 le16_to_cpu(ev->latency),
4947 le16_to_cpu(ev->supervision_timeout));
4948 }
4949
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4950 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
4951 struct sk_buff *skb)
4952 {
4953 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
4954
4955 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4956
4957 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
4958 ev->role, le16_to_cpu(ev->handle),
4959 le16_to_cpu(ev->interval),
4960 le16_to_cpu(ev->latency),
4961 le16_to_cpu(ev->supervision_timeout));
4962 }
4963
hci_le_ext_adv_term_evt(struct hci_dev * hdev,struct sk_buff * skb)4964 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
4965 {
4966 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
4967 struct hci_conn *conn;
4968
4969 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4970
4971 if (ev->status)
4972 return;
4973
4974 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
4975 if (conn) {
4976 struct adv_info *adv_instance;
4977
4978 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
4979 return;
4980
4981 if (!hdev->cur_adv_instance) {
4982 bacpy(&conn->resp_addr, &hdev->random_addr);
4983 return;
4984 }
4985
4986 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
4987 if (adv_instance)
4988 bacpy(&conn->resp_addr, &adv_instance->random_addr);
4989 }
4990 }
4991
hci_le_conn_update_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4992 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4993 struct sk_buff *skb)
4994 {
4995 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4996 struct hci_conn *conn;
4997
4998 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4999
5000 if (ev->status)
5001 return;
5002
5003 hci_dev_lock(hdev);
5004
5005 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5006 if (conn) {
5007 conn->le_conn_interval = le16_to_cpu(ev->interval);
5008 conn->le_conn_latency = le16_to_cpu(ev->latency);
5009 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5010 }
5011
5012 hci_dev_unlock(hdev);
5013 }
5014
5015 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 adv_type,bdaddr_t * direct_rpa)5016 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5017 bdaddr_t *addr,
5018 u8 addr_type, u8 adv_type,
5019 bdaddr_t *direct_rpa)
5020 {
5021 struct hci_conn *conn;
5022 struct hci_conn_params *params;
5023
5024 /* If the event is not connectable don't proceed further */
5025 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5026 return NULL;
5027
5028 /* Ignore if the device is blocked */
5029 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5030 return NULL;
5031
5032 /* Most controller will fail if we try to create new connections
5033 * while we have an existing one in slave role.
5034 */
5035 if (hdev->conn_hash.le_num_slave > 0)
5036 return NULL;
5037
5038 /* If we're not connectable only connect devices that we have in
5039 * our pend_le_conns list.
5040 */
5041 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5042 addr_type);
5043 if (!params)
5044 return NULL;
5045
5046 if (!params->explicit_connect) {
5047 switch (params->auto_connect) {
5048 case HCI_AUTO_CONN_DIRECT:
5049 /* Only devices advertising with ADV_DIRECT_IND are
5050 * triggering a connection attempt. This is allowing
5051 * incoming connections from slave devices.
5052 */
5053 if (adv_type != LE_ADV_DIRECT_IND)
5054 return NULL;
5055 break;
5056 case HCI_AUTO_CONN_ALWAYS:
5057 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5058 * are triggering a connection attempt. This means
5059 * that incoming connectioms from slave device are
5060 * accepted and also outgoing connections to slave
5061 * devices are established when found.
5062 */
5063 break;
5064 default:
5065 return NULL;
5066 }
5067 }
5068
5069 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5070 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5071 direct_rpa);
5072 if (!IS_ERR(conn)) {
5073 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5074 * by higher layer that tried to connect, if no then
5075 * store the pointer since we don't really have any
5076 * other owner of the object besides the params that
5077 * triggered it. This way we can abort the connection if
5078 * the parameters get removed and keep the reference
5079 * count consistent once the connection is established.
5080 */
5081
5082 if (!params->explicit_connect)
5083 params->conn = hci_conn_get(conn);
5084
5085 return conn;
5086 }
5087
5088 switch (PTR_ERR(conn)) {
5089 case -EBUSY:
5090 /* If hci_connect() returns -EBUSY it means there is already
5091 * an LE connection attempt going on. Since controllers don't
5092 * support more than one connection attempt at the time, we
5093 * don't consider this an error case.
5094 */
5095 break;
5096 default:
5097 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5098 return NULL;
5099 }
5100
5101 return NULL;
5102 }
5103
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,s8 rssi,u8 * data,u8 len)5104 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5105 u8 bdaddr_type, bdaddr_t *direct_addr,
5106 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
5107 {
5108 struct discovery_state *d = &hdev->discovery;
5109 struct smp_irk *irk;
5110 struct hci_conn *conn;
5111 bool match;
5112 u32 flags;
5113 u8 *ptr, real_len;
5114
5115 switch (type) {
5116 case LE_ADV_IND:
5117 case LE_ADV_DIRECT_IND:
5118 case LE_ADV_SCAN_IND:
5119 case LE_ADV_NONCONN_IND:
5120 case LE_ADV_SCAN_RSP:
5121 break;
5122 default:
5123 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5124 "type: 0x%02x", type);
5125 return;
5126 }
5127
5128 /* Find the end of the data in case the report contains padded zero
5129 * bytes at the end causing an invalid length value.
5130 *
5131 * When data is NULL, len is 0 so there is no need for extra ptr
5132 * check as 'ptr < data + 0' is already false in such case.
5133 */
5134 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5135 if (ptr + 1 + *ptr > data + len)
5136 break;
5137 }
5138
5139 real_len = ptr - data;
5140
5141 /* Adjust for actual length */
5142 if (len != real_len) {
5143 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5144 len = real_len;
5145 }
5146
5147 /* If the direct address is present, then this report is from
5148 * a LE Direct Advertising Report event. In that case it is
5149 * important to see if the address is matching the local
5150 * controller address.
5151 */
5152 if (direct_addr) {
5153 /* Only resolvable random addresses are valid for these
5154 * kind of reports and others can be ignored.
5155 */
5156 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5157 return;
5158
5159 /* If the controller is not using resolvable random
5160 * addresses, then this report can be ignored.
5161 */
5162 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5163 return;
5164
5165 /* If the local IRK of the controller does not match
5166 * with the resolvable random address provided, then
5167 * this report can be ignored.
5168 */
5169 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5170 return;
5171 }
5172
5173 /* Check if we need to convert to identity address */
5174 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5175 if (irk) {
5176 bdaddr = &irk->bdaddr;
5177 bdaddr_type = irk->addr_type;
5178 }
5179
5180 /* Check if we have been requested to connect to this device.
5181 *
5182 * direct_addr is set only for directed advertising reports (it is NULL
5183 * for advertising reports) and is already verified to be RPA above.
5184 */
5185 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5186 direct_addr);
5187 if (conn && type == LE_ADV_IND) {
5188 /* Store report for later inclusion by
5189 * mgmt_device_connected
5190 */
5191 memcpy(conn->le_adv_data, data, len);
5192 conn->le_adv_data_len = len;
5193 }
5194
5195 /* Passive scanning shouldn't trigger any device found events,
5196 * except for devices marked as CONN_REPORT for which we do send
5197 * device found events.
5198 */
5199 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5200 if (type == LE_ADV_DIRECT_IND)
5201 return;
5202
5203 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5204 bdaddr, bdaddr_type))
5205 return;
5206
5207 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5208 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5209 else
5210 flags = 0;
5211 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5212 rssi, flags, data, len, NULL, 0);
5213 return;
5214 }
5215
5216 /* When receiving non-connectable or scannable undirected
5217 * advertising reports, this means that the remote device is
5218 * not connectable and then clearly indicate this in the
5219 * device found event.
5220 *
5221 * When receiving a scan response, then there is no way to
5222 * know if the remote device is connectable or not. However
5223 * since scan responses are merged with a previously seen
5224 * advertising report, the flags field from that report
5225 * will be used.
5226 *
5227 * In the really unlikely case that a controller get confused
5228 * and just sends a scan response event, then it is marked as
5229 * not connectable as well.
5230 */
5231 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5232 type == LE_ADV_SCAN_RSP)
5233 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5234 else
5235 flags = 0;
5236
5237 /* If there's nothing pending either store the data from this
5238 * event or send an immediate device found event if the data
5239 * should not be stored for later.
5240 */
5241 if (!has_pending_adv_report(hdev)) {
5242 /* If the report will trigger a SCAN_REQ store it for
5243 * later merging.
5244 */
5245 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5246 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5247 rssi, flags, data, len);
5248 return;
5249 }
5250
5251 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5252 rssi, flags, data, len, NULL, 0);
5253 return;
5254 }
5255
5256 /* Check if the pending report is for the same device as the new one */
5257 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5258 bdaddr_type == d->last_adv_addr_type);
5259
5260 /* If the pending data doesn't match this report or this isn't a
5261 * scan response (e.g. we got a duplicate ADV_IND) then force
5262 * sending of the pending data.
5263 */
5264 if (type != LE_ADV_SCAN_RSP || !match) {
5265 /* Send out whatever is in the cache, but skip duplicates */
5266 if (!match)
5267 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5268 d->last_adv_addr_type, NULL,
5269 d->last_adv_rssi, d->last_adv_flags,
5270 d->last_adv_data,
5271 d->last_adv_data_len, NULL, 0);
5272
5273 /* If the new report will trigger a SCAN_REQ store it for
5274 * later merging.
5275 */
5276 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5277 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5278 rssi, flags, data, len);
5279 return;
5280 }
5281
5282 /* The advertising reports cannot be merged, so clear
5283 * the pending report and send out a device found event.
5284 */
5285 clear_pending_adv_report(hdev);
5286 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5287 rssi, flags, data, len, NULL, 0);
5288 return;
5289 }
5290
5291 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5292 * the new event is a SCAN_RSP. We can therefore proceed with
5293 * sending a merged device found event.
5294 */
5295 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5296 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5297 d->last_adv_data, d->last_adv_data_len, data, len);
5298 clear_pending_adv_report(hdev);
5299 }
5300
hci_le_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5301 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5302 {
5303 u8 num_reports = skb->data[0];
5304 void *ptr = &skb->data[1];
5305
5306 hci_dev_lock(hdev);
5307
5308 while (num_reports--) {
5309 struct hci_ev_le_advertising_info *ev = ptr;
5310 s8 rssi;
5311
5312 if (ev->length <= HCI_MAX_AD_LENGTH) {
5313 rssi = ev->data[ev->length];
5314 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5315 ev->bdaddr_type, NULL, 0, rssi,
5316 ev->data, ev->length);
5317 } else {
5318 bt_dev_err(hdev, "Dropping invalid advertising data");
5319 }
5320
5321 ptr += sizeof(*ev) + ev->length + 1;
5322 }
5323
5324 hci_dev_unlock(hdev);
5325 }
5326
ext_evt_type_to_legacy(u16 evt_type)5327 static u8 ext_evt_type_to_legacy(u16 evt_type)
5328 {
5329 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5330 switch (evt_type) {
5331 case LE_LEGACY_ADV_IND:
5332 return LE_ADV_IND;
5333 case LE_LEGACY_ADV_DIRECT_IND:
5334 return LE_ADV_DIRECT_IND;
5335 case LE_LEGACY_ADV_SCAN_IND:
5336 return LE_ADV_SCAN_IND;
5337 case LE_LEGACY_NONCONN_IND:
5338 return LE_ADV_NONCONN_IND;
5339 case LE_LEGACY_SCAN_RSP_ADV:
5340 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5341 return LE_ADV_SCAN_RSP;
5342 }
5343
5344 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5345 evt_type);
5346
5347 return LE_ADV_INVALID;
5348 }
5349
5350 if (evt_type & LE_EXT_ADV_CONN_IND) {
5351 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5352 return LE_ADV_DIRECT_IND;
5353
5354 return LE_ADV_IND;
5355 }
5356
5357 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5358 return LE_ADV_SCAN_RSP;
5359
5360 if (evt_type & LE_EXT_ADV_SCAN_IND)
5361 return LE_ADV_SCAN_IND;
5362
5363 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5364 evt_type & LE_EXT_ADV_DIRECT_IND)
5365 return LE_ADV_NONCONN_IND;
5366
5367 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5368 evt_type);
5369
5370 return LE_ADV_INVALID;
5371 }
5372
hci_le_ext_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5373 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5374 {
5375 u8 num_reports = skb->data[0];
5376 void *ptr = &skb->data[1];
5377
5378 hci_dev_lock(hdev);
5379
5380 while (num_reports--) {
5381 struct hci_ev_le_ext_adv_report *ev = ptr;
5382 u8 legacy_evt_type;
5383 u16 evt_type;
5384
5385 evt_type = __le16_to_cpu(ev->evt_type);
5386 legacy_evt_type = ext_evt_type_to_legacy(evt_type);
5387 if (legacy_evt_type != LE_ADV_INVALID) {
5388 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5389 ev->bdaddr_type, NULL, 0, ev->rssi,
5390 ev->data, ev->length);
5391 }
5392
5393 ptr += sizeof(*ev) + ev->length + 1;
5394 }
5395
5396 hci_dev_unlock(hdev);
5397 }
5398
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5399 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5400 struct sk_buff *skb)
5401 {
5402 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5403 struct hci_conn *conn;
5404
5405 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5406
5407 hci_dev_lock(hdev);
5408
5409 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5410 if (conn) {
5411 if (!ev->status)
5412 memcpy(conn->features[0], ev->features, 8);
5413
5414 if (conn->state == BT_CONFIG) {
5415 __u8 status;
5416
5417 /* If the local controller supports slave-initiated
5418 * features exchange, but the remote controller does
5419 * not, then it is possible that the error code 0x1a
5420 * for unsupported remote feature gets returned.
5421 *
5422 * In this specific case, allow the connection to
5423 * transition into connected state and mark it as
5424 * successful.
5425 */
5426 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5427 !conn->out && ev->status == 0x1a)
5428 status = 0x00;
5429 else
5430 status = ev->status;
5431
5432 conn->state = BT_CONNECTED;
5433 hci_connect_cfm(conn, status);
5434 hci_conn_drop(conn);
5435 }
5436 }
5437
5438 hci_dev_unlock(hdev);
5439 }
5440
hci_le_ltk_request_evt(struct hci_dev * hdev,struct sk_buff * skb)5441 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5442 {
5443 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5444 struct hci_cp_le_ltk_reply cp;
5445 struct hci_cp_le_ltk_neg_reply neg;
5446 struct hci_conn *conn;
5447 struct smp_ltk *ltk;
5448
5449 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5450
5451 hci_dev_lock(hdev);
5452
5453 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5454 if (conn == NULL)
5455 goto not_found;
5456
5457 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5458 if (!ltk)
5459 goto not_found;
5460
5461 if (smp_ltk_is_sc(ltk)) {
5462 /* With SC both EDiv and Rand are set to zero */
5463 if (ev->ediv || ev->rand)
5464 goto not_found;
5465 } else {
5466 /* For non-SC keys check that EDiv and Rand match */
5467 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5468 goto not_found;
5469 }
5470
5471 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5472 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5473 cp.handle = cpu_to_le16(conn->handle);
5474
5475 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5476
5477 conn->enc_key_size = ltk->enc_size;
5478
5479 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5480
5481 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5482 * temporary key used to encrypt a connection following
5483 * pairing. It is used during the Encrypted Session Setup to
5484 * distribute the keys. Later, security can be re-established
5485 * using a distributed LTK.
5486 */
5487 if (ltk->type == SMP_STK) {
5488 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5489 list_del_rcu(<k->list);
5490 kfree_rcu(ltk, rcu);
5491 } else {
5492 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5493 }
5494
5495 hci_dev_unlock(hdev);
5496
5497 return;
5498
5499 not_found:
5500 neg.handle = ev->handle;
5501 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5502 hci_dev_unlock(hdev);
5503 }
5504
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)5505 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5506 u8 reason)
5507 {
5508 struct hci_cp_le_conn_param_req_neg_reply cp;
5509
5510 cp.handle = cpu_to_le16(handle);
5511 cp.reason = reason;
5512
5513 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5514 &cp);
5515 }
5516
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,struct sk_buff * skb)5517 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5518 struct sk_buff *skb)
5519 {
5520 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5521 struct hci_cp_le_conn_param_req_reply cp;
5522 struct hci_conn *hcon;
5523 u16 handle, min, max, latency, timeout;
5524
5525 handle = le16_to_cpu(ev->handle);
5526 min = le16_to_cpu(ev->interval_min);
5527 max = le16_to_cpu(ev->interval_max);
5528 latency = le16_to_cpu(ev->latency);
5529 timeout = le16_to_cpu(ev->timeout);
5530
5531 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5532 if (!hcon || hcon->state != BT_CONNECTED)
5533 return send_conn_param_neg_reply(hdev, handle,
5534 HCI_ERROR_UNKNOWN_CONN_ID);
5535
5536 if (hci_check_conn_params(min, max, latency, timeout))
5537 return send_conn_param_neg_reply(hdev, handle,
5538 HCI_ERROR_INVALID_LL_PARAMS);
5539
5540 if (hcon->role == HCI_ROLE_MASTER) {
5541 struct hci_conn_params *params;
5542 u8 store_hint;
5543
5544 hci_dev_lock(hdev);
5545
5546 params = hci_conn_params_lookup(hdev, &hcon->dst,
5547 hcon->dst_type);
5548 if (params) {
5549 params->conn_min_interval = min;
5550 params->conn_max_interval = max;
5551 params->conn_latency = latency;
5552 params->supervision_timeout = timeout;
5553 store_hint = 0x01;
5554 } else{
5555 store_hint = 0x00;
5556 }
5557
5558 hci_dev_unlock(hdev);
5559
5560 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5561 store_hint, min, max, latency, timeout);
5562 }
5563
5564 cp.handle = ev->handle;
5565 cp.interval_min = ev->interval_min;
5566 cp.interval_max = ev->interval_max;
5567 cp.latency = ev->latency;
5568 cp.timeout = ev->timeout;
5569 cp.min_ce_len = 0;
5570 cp.max_ce_len = 0;
5571
5572 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5573 }
5574
hci_le_direct_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5575 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5576 struct sk_buff *skb)
5577 {
5578 u8 num_reports = skb->data[0];
5579 void *ptr = &skb->data[1];
5580
5581 hci_dev_lock(hdev);
5582
5583 while (num_reports--) {
5584 struct hci_ev_le_direct_adv_info *ev = ptr;
5585
5586 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5587 ev->bdaddr_type, &ev->direct_addr,
5588 ev->direct_addr_type, ev->rssi, NULL, 0);
5589
5590 ptr += sizeof(*ev);
5591 }
5592
5593 hci_dev_unlock(hdev);
5594 }
5595
hci_le_meta_evt(struct hci_dev * hdev,struct sk_buff * skb)5596 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5597 {
5598 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5599
5600 skb_pull(skb, sizeof(*le_ev));
5601
5602 switch (le_ev->subevent) {
5603 case HCI_EV_LE_CONN_COMPLETE:
5604 hci_le_conn_complete_evt(hdev, skb);
5605 break;
5606
5607 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5608 hci_le_conn_update_complete_evt(hdev, skb);
5609 break;
5610
5611 case HCI_EV_LE_ADVERTISING_REPORT:
5612 hci_le_adv_report_evt(hdev, skb);
5613 break;
5614
5615 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5616 hci_le_remote_feat_complete_evt(hdev, skb);
5617 break;
5618
5619 case HCI_EV_LE_LTK_REQ:
5620 hci_le_ltk_request_evt(hdev, skb);
5621 break;
5622
5623 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5624 hci_le_remote_conn_param_req_evt(hdev, skb);
5625 break;
5626
5627 case HCI_EV_LE_DIRECT_ADV_REPORT:
5628 hci_le_direct_adv_report_evt(hdev, skb);
5629 break;
5630
5631 case HCI_EV_LE_EXT_ADV_REPORT:
5632 hci_le_ext_adv_report_evt(hdev, skb);
5633 break;
5634
5635 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5636 hci_le_enh_conn_complete_evt(hdev, skb);
5637 break;
5638
5639 case HCI_EV_LE_EXT_ADV_SET_TERM:
5640 hci_le_ext_adv_term_evt(hdev, skb);
5641 break;
5642
5643 default:
5644 break;
5645 }
5646 }
5647
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)5648 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5649 u8 event, struct sk_buff *skb)
5650 {
5651 struct hci_ev_cmd_complete *ev;
5652 struct hci_event_hdr *hdr;
5653
5654 if (!skb)
5655 return false;
5656
5657 if (skb->len < sizeof(*hdr)) {
5658 bt_dev_err(hdev, "too short HCI event");
5659 return false;
5660 }
5661
5662 hdr = (void *) skb->data;
5663 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5664
5665 if (event) {
5666 if (hdr->evt != event)
5667 return false;
5668 return true;
5669 }
5670
5671 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5672 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5673 hdr->evt);
5674 return false;
5675 }
5676
5677 if (skb->len < sizeof(*ev)) {
5678 bt_dev_err(hdev, "too short cmd_complete event");
5679 return false;
5680 }
5681
5682 ev = (void *) skb->data;
5683 skb_pull(skb, sizeof(*ev));
5684
5685 if (opcode != __le16_to_cpu(ev->opcode)) {
5686 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5687 __le16_to_cpu(ev->opcode));
5688 return false;
5689 }
5690
5691 return true;
5692 }
5693
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)5694 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5695 {
5696 struct hci_event_hdr *hdr = (void *) skb->data;
5697 hci_req_complete_t req_complete = NULL;
5698 hci_req_complete_skb_t req_complete_skb = NULL;
5699 struct sk_buff *orig_skb = NULL;
5700 u8 status = 0, event = hdr->evt, req_evt = 0;
5701 u16 opcode = HCI_OP_NOP;
5702
5703 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5704 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5705 opcode = __le16_to_cpu(cmd_hdr->opcode);
5706 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5707 &req_complete_skb);
5708 req_evt = event;
5709 }
5710
5711 /* If it looks like we might end up having to call
5712 * req_complete_skb, store a pristine copy of the skb since the
5713 * various handlers may modify the original one through
5714 * skb_pull() calls, etc.
5715 */
5716 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5717 event == HCI_EV_CMD_COMPLETE)
5718 orig_skb = skb_clone(skb, GFP_KERNEL);
5719
5720 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5721
5722 switch (event) {
5723 case HCI_EV_INQUIRY_COMPLETE:
5724 hci_inquiry_complete_evt(hdev, skb);
5725 break;
5726
5727 case HCI_EV_INQUIRY_RESULT:
5728 hci_inquiry_result_evt(hdev, skb);
5729 break;
5730
5731 case HCI_EV_CONN_COMPLETE:
5732 hci_conn_complete_evt(hdev, skb);
5733 break;
5734
5735 case HCI_EV_CONN_REQUEST:
5736 hci_conn_request_evt(hdev, skb);
5737 break;
5738
5739 case HCI_EV_DISCONN_COMPLETE:
5740 hci_disconn_complete_evt(hdev, skb);
5741 break;
5742
5743 case HCI_EV_AUTH_COMPLETE:
5744 hci_auth_complete_evt(hdev, skb);
5745 break;
5746
5747 case HCI_EV_REMOTE_NAME:
5748 hci_remote_name_evt(hdev, skb);
5749 break;
5750
5751 case HCI_EV_ENCRYPT_CHANGE:
5752 hci_encrypt_change_evt(hdev, skb);
5753 break;
5754
5755 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5756 hci_change_link_key_complete_evt(hdev, skb);
5757 break;
5758
5759 case HCI_EV_REMOTE_FEATURES:
5760 hci_remote_features_evt(hdev, skb);
5761 break;
5762
5763 case HCI_EV_CMD_COMPLETE:
5764 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5765 &req_complete, &req_complete_skb);
5766 break;
5767
5768 case HCI_EV_CMD_STATUS:
5769 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5770 &req_complete_skb);
5771 break;
5772
5773 case HCI_EV_HARDWARE_ERROR:
5774 hci_hardware_error_evt(hdev, skb);
5775 break;
5776
5777 case HCI_EV_ROLE_CHANGE:
5778 hci_role_change_evt(hdev, skb);
5779 break;
5780
5781 case HCI_EV_NUM_COMP_PKTS:
5782 hci_num_comp_pkts_evt(hdev, skb);
5783 break;
5784
5785 case HCI_EV_MODE_CHANGE:
5786 hci_mode_change_evt(hdev, skb);
5787 break;
5788
5789 case HCI_EV_PIN_CODE_REQ:
5790 hci_pin_code_request_evt(hdev, skb);
5791 break;
5792
5793 case HCI_EV_LINK_KEY_REQ:
5794 hci_link_key_request_evt(hdev, skb);
5795 break;
5796
5797 case HCI_EV_LINK_KEY_NOTIFY:
5798 hci_link_key_notify_evt(hdev, skb);
5799 break;
5800
5801 case HCI_EV_CLOCK_OFFSET:
5802 hci_clock_offset_evt(hdev, skb);
5803 break;
5804
5805 case HCI_EV_PKT_TYPE_CHANGE:
5806 hci_pkt_type_change_evt(hdev, skb);
5807 break;
5808
5809 case HCI_EV_PSCAN_REP_MODE:
5810 hci_pscan_rep_mode_evt(hdev, skb);
5811 break;
5812
5813 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5814 hci_inquiry_result_with_rssi_evt(hdev, skb);
5815 break;
5816
5817 case HCI_EV_REMOTE_EXT_FEATURES:
5818 hci_remote_ext_features_evt(hdev, skb);
5819 break;
5820
5821 case HCI_EV_SYNC_CONN_COMPLETE:
5822 hci_sync_conn_complete_evt(hdev, skb);
5823 break;
5824
5825 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5826 hci_extended_inquiry_result_evt(hdev, skb);
5827 break;
5828
5829 case HCI_EV_KEY_REFRESH_COMPLETE:
5830 hci_key_refresh_complete_evt(hdev, skb);
5831 break;
5832
5833 case HCI_EV_IO_CAPA_REQUEST:
5834 hci_io_capa_request_evt(hdev, skb);
5835 break;
5836
5837 case HCI_EV_IO_CAPA_REPLY:
5838 hci_io_capa_reply_evt(hdev, skb);
5839 break;
5840
5841 case HCI_EV_USER_CONFIRM_REQUEST:
5842 hci_user_confirm_request_evt(hdev, skb);
5843 break;
5844
5845 case HCI_EV_USER_PASSKEY_REQUEST:
5846 hci_user_passkey_request_evt(hdev, skb);
5847 break;
5848
5849 case HCI_EV_USER_PASSKEY_NOTIFY:
5850 hci_user_passkey_notify_evt(hdev, skb);
5851 break;
5852
5853 case HCI_EV_KEYPRESS_NOTIFY:
5854 hci_keypress_notify_evt(hdev, skb);
5855 break;
5856
5857 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5858 hci_simple_pair_complete_evt(hdev, skb);
5859 break;
5860
5861 case HCI_EV_REMOTE_HOST_FEATURES:
5862 hci_remote_host_features_evt(hdev, skb);
5863 break;
5864
5865 case HCI_EV_LE_META:
5866 hci_le_meta_evt(hdev, skb);
5867 break;
5868
5869 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5870 hci_remote_oob_data_request_evt(hdev, skb);
5871 break;
5872
5873 #if IS_ENABLED(CONFIG_BT_HS)
5874 case HCI_EV_CHANNEL_SELECTED:
5875 hci_chan_selected_evt(hdev, skb);
5876 break;
5877
5878 case HCI_EV_PHY_LINK_COMPLETE:
5879 hci_phy_link_complete_evt(hdev, skb);
5880 break;
5881
5882 case HCI_EV_LOGICAL_LINK_COMPLETE:
5883 hci_loglink_complete_evt(hdev, skb);
5884 break;
5885
5886 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5887 hci_disconn_loglink_complete_evt(hdev, skb);
5888 break;
5889
5890 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5891 hci_disconn_phylink_complete_evt(hdev, skb);
5892 break;
5893 #endif
5894
5895 case HCI_EV_NUM_COMP_BLOCKS:
5896 hci_num_comp_blocks_evt(hdev, skb);
5897 break;
5898
5899 default:
5900 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5901 break;
5902 }
5903
5904 if (req_complete) {
5905 req_complete(hdev, status, opcode);
5906 } else if (req_complete_skb) {
5907 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5908 kfree_skb(orig_skb);
5909 orig_skb = NULL;
5910 }
5911 req_complete_skb(hdev, status, opcode, orig_skb);
5912 }
5913
5914 kfree_skb(orig_skb);
5915 kfree_skb(skb);
5916 hdev->stat.evt_rx++;
5917 }
5918