1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
42
43 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
44
45 /* Handle HCI Event packets */
46
hci_cc_inquiry_cancel(struct hci_dev * hdev,struct sk_buff * skb,u8 * new_status)47 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
48 u8 *new_status)
49 {
50 __u8 status = *((__u8 *) skb->data);
51
52 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53
54 /* It is possible that we receive Inquiry Complete event right
55 * before we receive Inquiry Cancel Command Complete event, in
56 * which case the latter event should have status of Command
57 * Disallowed (0x0c). This should not be treated as error, since
58 * we actually achieve what Inquiry Cancel wants to achieve,
59 * which is to end the last Inquiry session.
60 */
61 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
62 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
63 status = 0x00;
64 }
65
66 *new_status = status;
67
68 if (status)
69 return;
70
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
73 wake_up_bit(&hdev->flags, HCI_INQUIRY);
74
75 hci_dev_lock(hdev);
76 /* Set discovery state to stopped if we're not doing LE active
77 * scanning.
78 */
79 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
80 hdev->le_scan_type != LE_SCAN_ACTIVE)
81 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
82 hci_dev_unlock(hdev);
83
84 hci_conn_check_pending(hdev);
85 }
86
hci_cc_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)87 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
88 {
89 __u8 status = *((__u8 *) skb->data);
90
91 BT_DBG("%s status 0x%2.2x", hdev->name, status);
92
93 if (status)
94 return;
95
96 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
97 }
98
hci_cc_exit_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)99 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
100 {
101 __u8 status = *((__u8 *) skb->data);
102
103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
104
105 if (status)
106 return;
107
108 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
109
110 hci_conn_check_pending(hdev);
111 }
112
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,struct sk_buff * skb)113 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
114 struct sk_buff *skb)
115 {
116 BT_DBG("%s", hdev->name);
117 }
118
hci_cc_role_discovery(struct hci_dev * hdev,struct sk_buff * skb)119 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
120 {
121 struct hci_rp_role_discovery *rp = (void *) skb->data;
122 struct hci_conn *conn;
123
124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
125
126 if (rp->status)
127 return;
128
129 hci_dev_lock(hdev);
130
131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
132 if (conn)
133 conn->role = rp->role;
134
135 hci_dev_unlock(hdev);
136 }
137
hci_cc_read_link_policy(struct hci_dev * hdev,struct sk_buff * skb)138 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
139 {
140 struct hci_rp_read_link_policy *rp = (void *) skb->data;
141 struct hci_conn *conn;
142
143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
144
145 if (rp->status)
146 return;
147
148 hci_dev_lock(hdev);
149
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 if (conn)
152 conn->link_policy = __le16_to_cpu(rp->policy);
153
154 hci_dev_unlock(hdev);
155 }
156
hci_cc_write_link_policy(struct hci_dev * hdev,struct sk_buff * skb)157 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
158 {
159 struct hci_rp_write_link_policy *rp = (void *) skb->data;
160 struct hci_conn *conn;
161 void *sent;
162
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
169 if (!sent)
170 return;
171
172 hci_dev_lock(hdev);
173
174 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
175 if (conn)
176 conn->link_policy = get_unaligned_le16(sent + 2);
177
178 hci_dev_unlock(hdev);
179 }
180
hci_cc_read_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)181 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
182 struct sk_buff *skb)
183 {
184 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
185
186 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
187
188 if (rp->status)
189 return;
190
191 hdev->link_policy = __le16_to_cpu(rp->policy);
192 }
193
hci_cc_write_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)194 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
195 struct sk_buff *skb)
196 {
197 __u8 status = *((__u8 *) skb->data);
198 void *sent;
199
200 BT_DBG("%s status 0x%2.2x", hdev->name, status);
201
202 if (status)
203 return;
204
205 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
206 if (!sent)
207 return;
208
209 hdev->link_policy = get_unaligned_le16(sent);
210 }
211
hci_cc_reset(struct hci_dev * hdev,struct sk_buff * skb)212 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
213 {
214 __u8 status = *((__u8 *) skb->data);
215
216 BT_DBG("%s status 0x%2.2x", hdev->name, status);
217
218 clear_bit(HCI_RESET, &hdev->flags);
219
220 if (status)
221 return;
222
223 /* Reset all non-persistent flags */
224 hci_dev_clear_volatile_flags(hdev);
225
226 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
227
228 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
229 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
230
231 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
232 hdev->adv_data_len = 0;
233
234 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
235 hdev->scan_rsp_data_len = 0;
236
237 hdev->le_scan_type = LE_SCAN_PASSIVE;
238
239 hdev->ssp_debug_mode = 0;
240
241 hci_bdaddr_list_clear(&hdev->le_accept_list);
242 hci_bdaddr_list_clear(&hdev->le_resolv_list);
243 }
244
hci_cc_read_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)245 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
246 struct sk_buff *skb)
247 {
248 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
249 struct hci_cp_read_stored_link_key *sent;
250
251 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
252
253 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
254 if (!sent)
255 return;
256
257 if (!rp->status && sent->read_all == 0x01) {
258 hdev->stored_max_keys = rp->max_keys;
259 hdev->stored_num_keys = rp->num_keys;
260 }
261 }
262
hci_cc_delete_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)263 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
264 struct sk_buff *skb)
265 {
266 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
267
268 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
269
270 if (rp->status)
271 return;
272
273 if (rp->num_keys <= hdev->stored_num_keys)
274 hdev->stored_num_keys -= rp->num_keys;
275 else
276 hdev->stored_num_keys = 0;
277 }
278
hci_cc_write_local_name(struct hci_dev * hdev,struct sk_buff * skb)279 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
280 {
281 __u8 status = *((__u8 *) skb->data);
282 void *sent;
283
284 BT_DBG("%s status 0x%2.2x", hdev->name, status);
285
286 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
287 if (!sent)
288 return;
289
290 hci_dev_lock(hdev);
291
292 if (hci_dev_test_flag(hdev, HCI_MGMT))
293 mgmt_set_local_name_complete(hdev, sent, status);
294 else if (!status)
295 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
296
297 hci_dev_unlock(hdev);
298 }
299
hci_cc_read_local_name(struct hci_dev * hdev,struct sk_buff * skb)300 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
301 {
302 struct hci_rp_read_local_name *rp = (void *) skb->data;
303
304 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
305
306 if (rp->status)
307 return;
308
309 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
310 hci_dev_test_flag(hdev, HCI_CONFIG))
311 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
312 }
313
hci_cc_write_auth_enable(struct hci_dev * hdev,struct sk_buff * skb)314 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
315 {
316 __u8 status = *((__u8 *) skb->data);
317 void *sent;
318
319 BT_DBG("%s status 0x%2.2x", hdev->name, status);
320
321 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
322 if (!sent)
323 return;
324
325 hci_dev_lock(hdev);
326
327 if (!status) {
328 __u8 param = *((__u8 *) sent);
329
330 if (param == AUTH_ENABLED)
331 set_bit(HCI_AUTH, &hdev->flags);
332 else
333 clear_bit(HCI_AUTH, &hdev->flags);
334 }
335
336 if (hci_dev_test_flag(hdev, HCI_MGMT))
337 mgmt_auth_enable_complete(hdev, status);
338
339 hci_dev_unlock(hdev);
340 }
341
hci_cc_write_encrypt_mode(struct hci_dev * hdev,struct sk_buff * skb)342 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
343 {
344 __u8 status = *((__u8 *) skb->data);
345 __u8 param;
346 void *sent;
347
348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
349
350 if (status)
351 return;
352
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
354 if (!sent)
355 return;
356
357 param = *((__u8 *) sent);
358
359 if (param)
360 set_bit(HCI_ENCRYPT, &hdev->flags);
361 else
362 clear_bit(HCI_ENCRYPT, &hdev->flags);
363 }
364
hci_cc_write_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)365 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
366 {
367 __u8 status = *((__u8 *) skb->data);
368 __u8 param;
369 void *sent;
370
371 BT_DBG("%s status 0x%2.2x", hdev->name, status);
372
373 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
374 if (!sent)
375 return;
376
377 param = *((__u8 *) sent);
378
379 hci_dev_lock(hdev);
380
381 if (status) {
382 hdev->discov_timeout = 0;
383 goto done;
384 }
385
386 if (param & SCAN_INQUIRY)
387 set_bit(HCI_ISCAN, &hdev->flags);
388 else
389 clear_bit(HCI_ISCAN, &hdev->flags);
390
391 if (param & SCAN_PAGE)
392 set_bit(HCI_PSCAN, &hdev->flags);
393 else
394 clear_bit(HCI_PSCAN, &hdev->flags);
395
396 done:
397 hci_dev_unlock(hdev);
398 }
399
hci_cc_set_event_filter(struct hci_dev * hdev,struct sk_buff * skb)400 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
401 {
402 __u8 status = *((__u8 *)skb->data);
403 struct hci_cp_set_event_filter *cp;
404 void *sent;
405
406 BT_DBG("%s status 0x%2.2x", hdev->name, status);
407
408 if (status)
409 return;
410
411 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
412 if (!sent)
413 return;
414
415 cp = (struct hci_cp_set_event_filter *)sent;
416
417 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
418 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
419 else
420 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
421 }
422
hci_cc_read_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)423 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
426
427 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
428
429 if (rp->status)
430 return;
431
432 memcpy(hdev->dev_class, rp->dev_class, 3);
433
434 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
435 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
436 }
437
hci_cc_write_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)438 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
439 {
440 __u8 status = *((__u8 *) skb->data);
441 void *sent;
442
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
444
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
446 if (!sent)
447 return;
448
449 hci_dev_lock(hdev);
450
451 if (status == 0)
452 memcpy(hdev->dev_class, sent, 3);
453
454 if (hci_dev_test_flag(hdev, HCI_MGMT))
455 mgmt_set_class_of_dev_complete(hdev, sent, status);
456
457 hci_dev_unlock(hdev);
458 }
459
hci_cc_read_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)460 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
461 {
462 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
463 __u16 setting;
464
465 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
466
467 if (rp->status)
468 return;
469
470 setting = __le16_to_cpu(rp->voice_setting);
471
472 if (hdev->voice_setting == setting)
473 return;
474
475 hdev->voice_setting = setting;
476
477 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
478
479 if (hdev->notify)
480 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
481 }
482
hci_cc_write_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)483 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
484 struct sk_buff *skb)
485 {
486 __u8 status = *((__u8 *) skb->data);
487 __u16 setting;
488 void *sent;
489
490 BT_DBG("%s status 0x%2.2x", hdev->name, status);
491
492 if (status)
493 return;
494
495 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
496 if (!sent)
497 return;
498
499 setting = get_unaligned_le16(sent);
500
501 if (hdev->voice_setting == setting)
502 return;
503
504 hdev->voice_setting = setting;
505
506 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
507
508 if (hdev->notify)
509 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
510 }
511
hci_cc_read_num_supported_iac(struct hci_dev * hdev,struct sk_buff * skb)512 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
513 struct sk_buff *skb)
514 {
515 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
516
517 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
518
519 if (rp->status)
520 return;
521
522 hdev->num_iac = rp->num_iac;
523
524 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
525 }
526
hci_cc_write_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)527 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
528 {
529 __u8 status = *((__u8 *) skb->data);
530 struct hci_cp_write_ssp_mode *sent;
531
532 BT_DBG("%s status 0x%2.2x", hdev->name, status);
533
534 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
535 if (!sent)
536 return;
537
538 hci_dev_lock(hdev);
539
540 if (!status) {
541 if (sent->mode)
542 hdev->features[1][0] |= LMP_HOST_SSP;
543 else
544 hdev->features[1][0] &= ~LMP_HOST_SSP;
545 }
546
547 if (hci_dev_test_flag(hdev, HCI_MGMT))
548 mgmt_ssp_enable_complete(hdev, sent->mode, status);
549 else if (!status) {
550 if (sent->mode)
551 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
552 else
553 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
554 }
555
556 hci_dev_unlock(hdev);
557 }
558
hci_cc_write_sc_support(struct hci_dev * hdev,struct sk_buff * skb)559 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
560 {
561 u8 status = *((u8 *) skb->data);
562 struct hci_cp_write_sc_support *sent;
563
564 BT_DBG("%s status 0x%2.2x", hdev->name, status);
565
566 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
567 if (!sent)
568 return;
569
570 hci_dev_lock(hdev);
571
572 if (!status) {
573 if (sent->support)
574 hdev->features[1][0] |= LMP_HOST_SC;
575 else
576 hdev->features[1][0] &= ~LMP_HOST_SC;
577 }
578
579 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
580 if (sent->support)
581 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
582 else
583 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
584 }
585
586 hci_dev_unlock(hdev);
587 }
588
hci_cc_read_local_version(struct hci_dev * hdev,struct sk_buff * skb)589 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
590 {
591 struct hci_rp_read_local_version *rp = (void *) skb->data;
592
593 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
594
595 if (rp->status)
596 return;
597
598 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
599 hci_dev_test_flag(hdev, HCI_CONFIG)) {
600 hdev->hci_ver = rp->hci_ver;
601 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
602 hdev->lmp_ver = rp->lmp_ver;
603 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
604 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
605 }
606 }
607
hci_cc_read_local_commands(struct hci_dev * hdev,struct sk_buff * skb)608 static void hci_cc_read_local_commands(struct hci_dev *hdev,
609 struct sk_buff *skb)
610 {
611 struct hci_rp_read_local_commands *rp = (void *) skb->data;
612
613 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
614
615 if (rp->status)
616 return;
617
618 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
619 hci_dev_test_flag(hdev, HCI_CONFIG))
620 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
621 }
622
hci_cc_read_auth_payload_timeout(struct hci_dev * hdev,struct sk_buff * skb)623 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
624 struct sk_buff *skb)
625 {
626 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
627 struct hci_conn *conn;
628
629 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
630
631 if (rp->status)
632 return;
633
634 hci_dev_lock(hdev);
635
636 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
637 if (conn)
638 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
639
640 hci_dev_unlock(hdev);
641 }
642
hci_cc_write_auth_payload_timeout(struct hci_dev * hdev,struct sk_buff * skb)643 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
644 struct sk_buff *skb)
645 {
646 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
647 struct hci_conn *conn;
648 void *sent;
649
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651
652 if (rp->status)
653 return;
654
655 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
656 if (!sent)
657 return;
658
659 hci_dev_lock(hdev);
660
661 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
662 if (conn)
663 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
664
665 hci_dev_unlock(hdev);
666 }
667
hci_cc_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)668 static void hci_cc_read_local_features(struct hci_dev *hdev,
669 struct sk_buff *skb)
670 {
671 struct hci_rp_read_local_features *rp = (void *) skb->data;
672
673 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
674
675 if (rp->status)
676 return;
677
678 memcpy(hdev->features, rp->features, 8);
679
680 /* Adjust default settings according to features
681 * supported by device. */
682
683 if (hdev->features[0][0] & LMP_3SLOT)
684 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
685
686 if (hdev->features[0][0] & LMP_5SLOT)
687 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
688
689 if (hdev->features[0][1] & LMP_HV2) {
690 hdev->pkt_type |= (HCI_HV2);
691 hdev->esco_type |= (ESCO_HV2);
692 }
693
694 if (hdev->features[0][1] & LMP_HV3) {
695 hdev->pkt_type |= (HCI_HV3);
696 hdev->esco_type |= (ESCO_HV3);
697 }
698
699 if (lmp_esco_capable(hdev))
700 hdev->esco_type |= (ESCO_EV3);
701
702 if (hdev->features[0][4] & LMP_EV4)
703 hdev->esco_type |= (ESCO_EV4);
704
705 if (hdev->features[0][4] & LMP_EV5)
706 hdev->esco_type |= (ESCO_EV5);
707
708 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
709 hdev->esco_type |= (ESCO_2EV3);
710
711 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
712 hdev->esco_type |= (ESCO_3EV3);
713
714 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
715 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
716 }
717
hci_cc_read_local_ext_features(struct hci_dev * hdev,struct sk_buff * skb)718 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
719 struct sk_buff *skb)
720 {
721 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
722
723 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
724
725 if (rp->status)
726 return;
727
728 if (hdev->max_page < rp->max_page)
729 hdev->max_page = rp->max_page;
730
731 if (rp->page < HCI_MAX_PAGES)
732 memcpy(hdev->features[rp->page], rp->features, 8);
733 }
734
hci_cc_read_flow_control_mode(struct hci_dev * hdev,struct sk_buff * skb)735 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
736 struct sk_buff *skb)
737 {
738 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
739
740 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
741
742 if (rp->status)
743 return;
744
745 hdev->flow_ctl_mode = rp->mode;
746 }
747
hci_cc_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)748 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
749 {
750 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
751
752 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
753
754 if (rp->status)
755 return;
756
757 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
758 hdev->sco_mtu = rp->sco_mtu;
759 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
760 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
761
762 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
763 hdev->sco_mtu = 64;
764 hdev->sco_pkts = 8;
765 }
766
767 hdev->acl_cnt = hdev->acl_pkts;
768 hdev->sco_cnt = hdev->sco_pkts;
769
770 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
771 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
772 }
773
hci_cc_read_bd_addr(struct hci_dev * hdev,struct sk_buff * skb)774 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
775 {
776 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
777
778 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
779
780 if (rp->status)
781 return;
782
783 if (test_bit(HCI_INIT, &hdev->flags))
784 bacpy(&hdev->bdaddr, &rp->bdaddr);
785
786 if (hci_dev_test_flag(hdev, HCI_SETUP))
787 bacpy(&hdev->setup_addr, &rp->bdaddr);
788 }
789
hci_cc_read_local_pairing_opts(struct hci_dev * hdev,struct sk_buff * skb)790 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
791 struct sk_buff *skb)
792 {
793 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
794
795 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
796
797 if (rp->status)
798 return;
799
800 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
801 hci_dev_test_flag(hdev, HCI_CONFIG)) {
802 hdev->pairing_opts = rp->pairing_opts;
803 hdev->max_enc_key_size = rp->max_key_size;
804 }
805 }
806
hci_cc_read_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)807 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
808 struct sk_buff *skb)
809 {
810 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
811
812 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
813
814 if (rp->status)
815 return;
816
817 if (test_bit(HCI_INIT, &hdev->flags)) {
818 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
819 hdev->page_scan_window = __le16_to_cpu(rp->window);
820 }
821 }
822
hci_cc_write_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)823 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
824 struct sk_buff *skb)
825 {
826 u8 status = *((u8 *) skb->data);
827 struct hci_cp_write_page_scan_activity *sent;
828
829 BT_DBG("%s status 0x%2.2x", hdev->name, status);
830
831 if (status)
832 return;
833
834 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
835 if (!sent)
836 return;
837
838 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
839 hdev->page_scan_window = __le16_to_cpu(sent->window);
840 }
841
hci_cc_read_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)842 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
843 struct sk_buff *skb)
844 {
845 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
846
847 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
848
849 if (rp->status)
850 return;
851
852 if (test_bit(HCI_INIT, &hdev->flags))
853 hdev->page_scan_type = rp->type;
854 }
855
hci_cc_write_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)856 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
857 struct sk_buff *skb)
858 {
859 u8 status = *((u8 *) skb->data);
860 u8 *type;
861
862 BT_DBG("%s status 0x%2.2x", hdev->name, status);
863
864 if (status)
865 return;
866
867 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
868 if (type)
869 hdev->page_scan_type = *type;
870 }
871
hci_cc_read_data_block_size(struct hci_dev * hdev,struct sk_buff * skb)872 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
873 struct sk_buff *skb)
874 {
875 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
876
877 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
878
879 if (rp->status)
880 return;
881
882 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
883 hdev->block_len = __le16_to_cpu(rp->block_len);
884 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
885
886 hdev->block_cnt = hdev->num_blocks;
887
888 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
889 hdev->block_cnt, hdev->block_len);
890 }
891
hci_cc_read_clock(struct hci_dev * hdev,struct sk_buff * skb)892 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
893 {
894 struct hci_rp_read_clock *rp = (void *) skb->data;
895 struct hci_cp_read_clock *cp;
896 struct hci_conn *conn;
897
898 BT_DBG("%s", hdev->name);
899
900 if (skb->len < sizeof(*rp))
901 return;
902
903 if (rp->status)
904 return;
905
906 hci_dev_lock(hdev);
907
908 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
909 if (!cp)
910 goto unlock;
911
912 if (cp->which == 0x00) {
913 hdev->clock = le32_to_cpu(rp->clock);
914 goto unlock;
915 }
916
917 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
918 if (conn) {
919 conn->clock = le32_to_cpu(rp->clock);
920 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
921 }
922
923 unlock:
924 hci_dev_unlock(hdev);
925 }
926
hci_cc_read_local_amp_info(struct hci_dev * hdev,struct sk_buff * skb)927 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
928 struct sk_buff *skb)
929 {
930 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
931
932 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
933
934 if (rp->status)
935 return;
936
937 hdev->amp_status = rp->amp_status;
938 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
939 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
940 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
941 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
942 hdev->amp_type = rp->amp_type;
943 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
944 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
945 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
946 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
947 }
948
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,struct sk_buff * skb)949 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
950 struct sk_buff *skb)
951 {
952 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
953
954 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
955
956 if (rp->status)
957 return;
958
959 hdev->inq_tx_power = rp->tx_power;
960 }
961
hci_cc_read_def_err_data_reporting(struct hci_dev * hdev,struct sk_buff * skb)962 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
963 struct sk_buff *skb)
964 {
965 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
966
967 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
968
969 if (rp->status)
970 return;
971
972 hdev->err_data_reporting = rp->err_data_reporting;
973 }
974
hci_cc_write_def_err_data_reporting(struct hci_dev * hdev,struct sk_buff * skb)975 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
976 struct sk_buff *skb)
977 {
978 __u8 status = *((__u8 *)skb->data);
979 struct hci_cp_write_def_err_data_reporting *cp;
980
981 BT_DBG("%s status 0x%2.2x", hdev->name, status);
982
983 if (status)
984 return;
985
986 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
987 if (!cp)
988 return;
989
990 hdev->err_data_reporting = cp->err_data_reporting;
991 }
992
hci_cc_pin_code_reply(struct hci_dev * hdev,struct sk_buff * skb)993 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
994 {
995 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
996 struct hci_cp_pin_code_reply *cp;
997 struct hci_conn *conn;
998
999 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1000
1001 hci_dev_lock(hdev);
1002
1003 if (hci_dev_test_flag(hdev, HCI_MGMT))
1004 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1005
1006 if (rp->status)
1007 goto unlock;
1008
1009 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1010 if (!cp)
1011 goto unlock;
1012
1013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1014 if (conn)
1015 conn->pin_length = cp->pin_len;
1016
1017 unlock:
1018 hci_dev_unlock(hdev);
1019 }
1020
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1021 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1022 {
1023 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1024
1025 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1026
1027 hci_dev_lock(hdev);
1028
1029 if (hci_dev_test_flag(hdev, HCI_MGMT))
1030 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1031 rp->status);
1032
1033 hci_dev_unlock(hdev);
1034 }
1035
hci_cc_le_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)1036 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1037 struct sk_buff *skb)
1038 {
1039 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1040
1041 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1042
1043 if (rp->status)
1044 return;
1045
1046 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1047 hdev->le_pkts = rp->le_max_pkt;
1048
1049 hdev->le_cnt = hdev->le_pkts;
1050
1051 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1052 }
1053
hci_cc_le_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)1054 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1055 struct sk_buff *skb)
1056 {
1057 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1058
1059 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1060
1061 if (rp->status)
1062 return;
1063
1064 memcpy(hdev->le_features, rp->features, 8);
1065 }
1066
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1067 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1068 struct sk_buff *skb)
1069 {
1070 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1071
1072 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1073
1074 if (rp->status)
1075 return;
1076
1077 hdev->adv_tx_power = rp->tx_power;
1078 }
1079
hci_cc_user_confirm_reply(struct hci_dev * hdev,struct sk_buff * skb)1080 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1081 {
1082 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1083
1084 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1085
1086 hci_dev_lock(hdev);
1087
1088 if (hci_dev_test_flag(hdev, HCI_MGMT))
1089 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1090 rp->status);
1091
1092 hci_dev_unlock(hdev);
1093 }
1094
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1095 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1096 struct sk_buff *skb)
1097 {
1098 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1099
1100 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1101
1102 hci_dev_lock(hdev);
1103
1104 if (hci_dev_test_flag(hdev, HCI_MGMT))
1105 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1106 ACL_LINK, 0, rp->status);
1107
1108 hci_dev_unlock(hdev);
1109 }
1110
hci_cc_user_passkey_reply(struct hci_dev * hdev,struct sk_buff * skb)1111 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1112 {
1113 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1114
1115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1116
1117 hci_dev_lock(hdev);
1118
1119 if (hci_dev_test_flag(hdev, HCI_MGMT))
1120 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1121 0, rp->status);
1122
1123 hci_dev_unlock(hdev);
1124 }
1125
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1126 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1127 struct sk_buff *skb)
1128 {
1129 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1130
1131 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1132
1133 hci_dev_lock(hdev);
1134
1135 if (hci_dev_test_flag(hdev, HCI_MGMT))
1136 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1137 ACL_LINK, 0, rp->status);
1138
1139 hci_dev_unlock(hdev);
1140 }
1141
hci_cc_read_local_oob_data(struct hci_dev * hdev,struct sk_buff * skb)1142 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1143 struct sk_buff *skb)
1144 {
1145 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1146
1147 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1148 }
1149
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,struct sk_buff * skb)1150 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1151 struct sk_buff *skb)
1152 {
1153 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1154
1155 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1156 }
1157
hci_cc_le_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1158 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1159 {
1160 __u8 status = *((__u8 *) skb->data);
1161 bdaddr_t *sent;
1162
1163 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1164
1165 if (status)
1166 return;
1167
1168 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1169 if (!sent)
1170 return;
1171
1172 hci_dev_lock(hdev);
1173
1174 bacpy(&hdev->random_addr, sent);
1175
1176 if (!bacmp(&hdev->rpa, sent)) {
1177 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1178 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1179 secs_to_jiffies(hdev->rpa_timeout));
1180 }
1181
1182 hci_dev_unlock(hdev);
1183 }
1184
hci_cc_le_set_default_phy(struct hci_dev * hdev,struct sk_buff * skb)1185 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1186 {
1187 __u8 status = *((__u8 *) skb->data);
1188 struct hci_cp_le_set_default_phy *cp;
1189
1190 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1191
1192 if (status)
1193 return;
1194
1195 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1196 if (!cp)
1197 return;
1198
1199 hci_dev_lock(hdev);
1200
1201 hdev->le_tx_def_phys = cp->tx_phys;
1202 hdev->le_rx_def_phys = cp->rx_phys;
1203
1204 hci_dev_unlock(hdev);
1205 }
1206
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1207 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1208 struct sk_buff *skb)
1209 {
1210 __u8 status = *((__u8 *) skb->data);
1211 struct hci_cp_le_set_adv_set_rand_addr *cp;
1212 struct adv_info *adv;
1213
1214 if (status)
1215 return;
1216
1217 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1218 /* Update only in case the adv instance since handle 0x00 shall be using
1219 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1220 * non-extended adverting.
1221 */
1222 if (!cp || !cp->handle)
1223 return;
1224
1225 hci_dev_lock(hdev);
1226
1227 adv = hci_find_adv_instance(hdev, cp->handle);
1228 if (adv) {
1229 bacpy(&adv->random_addr, &cp->bdaddr);
1230 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1231 adv->rpa_expired = false;
1232 queue_delayed_work(hdev->workqueue,
1233 &adv->rpa_expired_cb,
1234 secs_to_jiffies(hdev->rpa_timeout));
1235 }
1236 }
1237
1238 hci_dev_unlock(hdev);
1239 }
1240
hci_cc_le_read_transmit_power(struct hci_dev * hdev,struct sk_buff * skb)1241 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1242 struct sk_buff *skb)
1243 {
1244 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1245
1246 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1247
1248 if (rp->status)
1249 return;
1250
1251 hdev->min_le_tx_power = rp->min_le_tx_power;
1252 hdev->max_le_tx_power = rp->max_le_tx_power;
1253 }
1254
hci_cc_le_set_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1255 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1256 {
1257 __u8 *sent, status = *((__u8 *) skb->data);
1258
1259 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1260
1261 if (status)
1262 return;
1263
1264 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1265 if (!sent)
1266 return;
1267
1268 hci_dev_lock(hdev);
1269
1270 /* If we're doing connection initiation as peripheral. Set a
1271 * timeout in case something goes wrong.
1272 */
1273 if (*sent) {
1274 struct hci_conn *conn;
1275
1276 hci_dev_set_flag(hdev, HCI_LE_ADV);
1277
1278 conn = hci_lookup_le_connect(hdev);
1279 if (conn)
1280 queue_delayed_work(hdev->workqueue,
1281 &conn->le_conn_timeout,
1282 conn->conn_timeout);
1283 } else {
1284 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1285 }
1286
1287 hci_dev_unlock(hdev);
1288 }
1289
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1290 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1291 struct sk_buff *skb)
1292 {
1293 struct hci_cp_le_set_ext_adv_enable *cp;
1294 struct hci_cp_ext_adv_set *set;
1295 __u8 status = *((__u8 *) skb->data);
1296 struct adv_info *adv = NULL, *n;
1297
1298 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1299
1300 if (status)
1301 return;
1302
1303 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1304 if (!cp)
1305 return;
1306
1307 set = (void *)cp->data;
1308
1309 hci_dev_lock(hdev);
1310
1311 if (cp->num_of_sets)
1312 adv = hci_find_adv_instance(hdev, set->handle);
1313
1314 if (cp->enable) {
1315 struct hci_conn *conn;
1316
1317 hci_dev_set_flag(hdev, HCI_LE_ADV);
1318
1319 if (adv)
1320 adv->enabled = true;
1321
1322 conn = hci_lookup_le_connect(hdev);
1323 if (conn)
1324 queue_delayed_work(hdev->workqueue,
1325 &conn->le_conn_timeout,
1326 conn->conn_timeout);
1327 } else {
1328 if (adv) {
1329 adv->enabled = false;
1330 /* If just one instance was disabled check if there are
1331 * any other instance enabled before clearing HCI_LE_ADV
1332 */
1333 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1334 list) {
1335 if (adv->enabled)
1336 goto unlock;
1337 }
1338 } else {
1339 /* All instances shall be considered disabled */
1340 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1341 list)
1342 adv->enabled = false;
1343 }
1344
1345 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1346 }
1347
1348 unlock:
1349 hci_dev_unlock(hdev);
1350 }
1351
hci_cc_le_set_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1352 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1353 {
1354 struct hci_cp_le_set_scan_param *cp;
1355 __u8 status = *((__u8 *) skb->data);
1356
1357 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1358
1359 if (status)
1360 return;
1361
1362 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1363 if (!cp)
1364 return;
1365
1366 hci_dev_lock(hdev);
1367
1368 hdev->le_scan_type = cp->type;
1369
1370 hci_dev_unlock(hdev);
1371 }
1372
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1373 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1374 struct sk_buff *skb)
1375 {
1376 struct hci_cp_le_set_ext_scan_params *cp;
1377 __u8 status = *((__u8 *) skb->data);
1378 struct hci_cp_le_scan_phy_params *phy_param;
1379
1380 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1381
1382 if (status)
1383 return;
1384
1385 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1386 if (!cp)
1387 return;
1388
1389 phy_param = (void *)cp->data;
1390
1391 hci_dev_lock(hdev);
1392
1393 hdev->le_scan_type = phy_param->type;
1394
1395 hci_dev_unlock(hdev);
1396 }
1397
has_pending_adv_report(struct hci_dev * hdev)1398 static bool has_pending_adv_report(struct hci_dev *hdev)
1399 {
1400 struct discovery_state *d = &hdev->discovery;
1401
1402 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1403 }
1404
clear_pending_adv_report(struct hci_dev * hdev)1405 static void clear_pending_adv_report(struct hci_dev *hdev)
1406 {
1407 struct discovery_state *d = &hdev->discovery;
1408
1409 bacpy(&d->last_adv_addr, BDADDR_ANY);
1410 d->last_adv_data_len = 0;
1411 }
1412
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1413 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1414 u8 bdaddr_type, s8 rssi, u32 flags,
1415 u8 *data, u8 len)
1416 {
1417 struct discovery_state *d = &hdev->discovery;
1418
1419 if (len > HCI_MAX_AD_LENGTH)
1420 return;
1421
1422 bacpy(&d->last_adv_addr, bdaddr);
1423 d->last_adv_addr_type = bdaddr_type;
1424 d->last_adv_rssi = rssi;
1425 d->last_adv_flags = flags;
1426 memcpy(d->last_adv_data, data, len);
1427 d->last_adv_data_len = len;
1428 }
1429
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1430 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1431 {
1432 hci_dev_lock(hdev);
1433
1434 switch (enable) {
1435 case LE_SCAN_ENABLE:
1436 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1437 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1438 clear_pending_adv_report(hdev);
1439 break;
1440
1441 case LE_SCAN_DISABLE:
1442 /* We do this here instead of when setting DISCOVERY_STOPPED
1443 * since the latter would potentially require waiting for
1444 * inquiry to stop too.
1445 */
1446 if (has_pending_adv_report(hdev)) {
1447 struct discovery_state *d = &hdev->discovery;
1448
1449 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1450 d->last_adv_addr_type, NULL,
1451 d->last_adv_rssi, d->last_adv_flags,
1452 d->last_adv_data,
1453 d->last_adv_data_len, NULL, 0);
1454 }
1455
1456 /* Cancel this timer so that we don't try to disable scanning
1457 * when it's already disabled.
1458 */
1459 cancel_delayed_work(&hdev->le_scan_disable);
1460
1461 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1462
1463 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1464 * interrupted scanning due to a connect request. Mark
1465 * therefore discovery as stopped. If this was not
1466 * because of a connect request advertising might have
1467 * been disabled because of active scanning, so
1468 * re-enable it again if necessary.
1469 */
1470 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1471 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1472 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1473 hdev->discovery.state == DISCOVERY_FINDING)
1474 hci_req_reenable_advertising(hdev);
1475
1476 break;
1477
1478 default:
1479 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1480 enable);
1481 break;
1482 }
1483
1484 hci_dev_unlock(hdev);
1485 }
1486
hci_cc_le_set_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1487 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1488 struct sk_buff *skb)
1489 {
1490 struct hci_cp_le_set_scan_enable *cp;
1491 __u8 status = *((__u8 *) skb->data);
1492
1493 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1494
1495 if (status)
1496 return;
1497
1498 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1499 if (!cp)
1500 return;
1501
1502 le_set_scan_enable_complete(hdev, cp->enable);
1503 }
1504
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1505 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1506 struct sk_buff *skb)
1507 {
1508 struct hci_cp_le_set_ext_scan_enable *cp;
1509 __u8 status = *((__u8 *) skb->data);
1510
1511 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1512
1513 if (status)
1514 return;
1515
1516 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1517 if (!cp)
1518 return;
1519
1520 le_set_scan_enable_complete(hdev, cp->enable);
1521 }
1522
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,struct sk_buff * skb)1523 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1524 struct sk_buff *skb)
1525 {
1526 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1527
1528 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1529 rp->num_of_sets);
1530
1531 if (rp->status)
1532 return;
1533
1534 hdev->le_num_of_adv_sets = rp->num_of_sets;
1535 }
1536
hci_cc_le_read_accept_list_size(struct hci_dev * hdev,struct sk_buff * skb)1537 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1538 struct sk_buff *skb)
1539 {
1540 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1541
1542 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1543
1544 if (rp->status)
1545 return;
1546
1547 hdev->le_accept_list_size = rp->size;
1548 }
1549
hci_cc_le_clear_accept_list(struct hci_dev * hdev,struct sk_buff * skb)1550 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1551 struct sk_buff *skb)
1552 {
1553 __u8 status = *((__u8 *) skb->data);
1554
1555 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1556
1557 if (status)
1558 return;
1559
1560 hci_bdaddr_list_clear(&hdev->le_accept_list);
1561 }
1562
hci_cc_le_add_to_accept_list(struct hci_dev * hdev,struct sk_buff * skb)1563 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1564 struct sk_buff *skb)
1565 {
1566 struct hci_cp_le_add_to_accept_list *sent;
1567 __u8 status = *((__u8 *) skb->data);
1568
1569 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1570
1571 if (status)
1572 return;
1573
1574 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1575 if (!sent)
1576 return;
1577
1578 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1579 sent->bdaddr_type);
1580 }
1581
hci_cc_le_del_from_accept_list(struct hci_dev * hdev,struct sk_buff * skb)1582 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1583 struct sk_buff *skb)
1584 {
1585 struct hci_cp_le_del_from_accept_list *sent;
1586 __u8 status = *((__u8 *) skb->data);
1587
1588 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1589
1590 if (status)
1591 return;
1592
1593 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1594 if (!sent)
1595 return;
1596
1597 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1598 sent->bdaddr_type);
1599 }
1600
hci_cc_le_read_supported_states(struct hci_dev * hdev,struct sk_buff * skb)1601 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1602 struct sk_buff *skb)
1603 {
1604 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1605
1606 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1607
1608 if (rp->status)
1609 return;
1610
1611 memcpy(hdev->le_states, rp->le_states, 8);
1612 }
1613
hci_cc_le_read_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1614 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1615 struct sk_buff *skb)
1616 {
1617 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1618
1619 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1620
1621 if (rp->status)
1622 return;
1623
1624 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1625 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1626 }
1627
hci_cc_le_write_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1628 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1629 struct sk_buff *skb)
1630 {
1631 struct hci_cp_le_write_def_data_len *sent;
1632 __u8 status = *((__u8 *) skb->data);
1633
1634 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1635
1636 if (status)
1637 return;
1638
1639 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1640 if (!sent)
1641 return;
1642
1643 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1644 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1645 }
1646
hci_cc_le_add_to_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1647 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1648 struct sk_buff *skb)
1649 {
1650 struct hci_cp_le_add_to_resolv_list *sent;
1651 __u8 status = *((__u8 *) skb->data);
1652
1653 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1654
1655 if (status)
1656 return;
1657
1658 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1659 if (!sent)
1660 return;
1661
1662 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1663 sent->bdaddr_type, sent->peer_irk,
1664 sent->local_irk);
1665 }
1666
hci_cc_le_del_from_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1667 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1668 struct sk_buff *skb)
1669 {
1670 struct hci_cp_le_del_from_resolv_list *sent;
1671 __u8 status = *((__u8 *) skb->data);
1672
1673 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1674
1675 if (status)
1676 return;
1677
1678 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1679 if (!sent)
1680 return;
1681
1682 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1683 sent->bdaddr_type);
1684 }
1685
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1686 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1687 struct sk_buff *skb)
1688 {
1689 __u8 status = *((__u8 *) skb->data);
1690
1691 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1692
1693 if (status)
1694 return;
1695
1696 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1697 }
1698
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,struct sk_buff * skb)1699 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1700 struct sk_buff *skb)
1701 {
1702 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1703
1704 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1705
1706 if (rp->status)
1707 return;
1708
1709 hdev->le_resolv_list_size = rp->size;
1710 }
1711
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,struct sk_buff * skb)1712 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1713 struct sk_buff *skb)
1714 {
1715 __u8 *sent, status = *((__u8 *) skb->data);
1716
1717 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1718
1719 if (status)
1720 return;
1721
1722 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1723 if (!sent)
1724 return;
1725
1726 hci_dev_lock(hdev);
1727
1728 if (*sent)
1729 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1730 else
1731 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1732
1733 hci_dev_unlock(hdev);
1734 }
1735
hci_cc_le_read_max_data_len(struct hci_dev * hdev,struct sk_buff * skb)1736 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1737 struct sk_buff *skb)
1738 {
1739 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1740
1741 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1742
1743 if (rp->status)
1744 return;
1745
1746 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1747 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1748 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1749 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1750 }
1751
hci_cc_write_le_host_supported(struct hci_dev * hdev,struct sk_buff * skb)1752 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1753 struct sk_buff *skb)
1754 {
1755 struct hci_cp_write_le_host_supported *sent;
1756 __u8 status = *((__u8 *) skb->data);
1757
1758 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1759
1760 if (status)
1761 return;
1762
1763 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1764 if (!sent)
1765 return;
1766
1767 hci_dev_lock(hdev);
1768
1769 if (sent->le) {
1770 hdev->features[1][0] |= LMP_HOST_LE;
1771 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1772 } else {
1773 hdev->features[1][0] &= ~LMP_HOST_LE;
1774 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1775 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1776 }
1777
1778 if (sent->simul)
1779 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1780 else
1781 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1782
1783 hci_dev_unlock(hdev);
1784 }
1785
hci_cc_set_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1786 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1787 {
1788 struct hci_cp_le_set_adv_param *cp;
1789 u8 status = *((u8 *) skb->data);
1790
1791 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1792
1793 if (status)
1794 return;
1795
1796 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1797 if (!cp)
1798 return;
1799
1800 hci_dev_lock(hdev);
1801 hdev->adv_addr_type = cp->own_address_type;
1802 hci_dev_unlock(hdev);
1803 }
1804
hci_cc_set_ext_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1805 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1806 {
1807 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1808 struct hci_cp_le_set_ext_adv_params *cp;
1809 struct adv_info *adv_instance;
1810
1811 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1812
1813 if (rp->status)
1814 return;
1815
1816 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1817 if (!cp)
1818 return;
1819
1820 hci_dev_lock(hdev);
1821 hdev->adv_addr_type = cp->own_addr_type;
1822 if (!cp->handle) {
1823 /* Store in hdev for instance 0 */
1824 hdev->adv_tx_power = rp->tx_power;
1825 } else {
1826 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1827 if (adv_instance)
1828 adv_instance->tx_power = rp->tx_power;
1829 }
1830 /* Update adv data as tx power is known now */
1831 hci_req_update_adv_data(hdev, cp->handle);
1832
1833 hci_dev_unlock(hdev);
1834 }
1835
hci_cc_read_rssi(struct hci_dev * hdev,struct sk_buff * skb)1836 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1837 {
1838 struct hci_rp_read_rssi *rp = (void *) skb->data;
1839 struct hci_conn *conn;
1840
1841 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1842
1843 if (rp->status)
1844 return;
1845
1846 hci_dev_lock(hdev);
1847
1848 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1849 if (conn)
1850 conn->rssi = rp->rssi;
1851
1852 hci_dev_unlock(hdev);
1853 }
1854
hci_cc_read_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1855 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1856 {
1857 struct hci_cp_read_tx_power *sent;
1858 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1859 struct hci_conn *conn;
1860
1861 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1862
1863 if (rp->status)
1864 return;
1865
1866 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1867 if (!sent)
1868 return;
1869
1870 hci_dev_lock(hdev);
1871
1872 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1873 if (!conn)
1874 goto unlock;
1875
1876 switch (sent->type) {
1877 case 0x00:
1878 conn->tx_power = rp->tx_power;
1879 break;
1880 case 0x01:
1881 conn->max_tx_power = rp->tx_power;
1882 break;
1883 }
1884
1885 unlock:
1886 hci_dev_unlock(hdev);
1887 }
1888
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,struct sk_buff * skb)1889 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1890 {
1891 u8 status = *((u8 *) skb->data);
1892 u8 *mode;
1893
1894 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1895
1896 if (status)
1897 return;
1898
1899 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1900 if (mode)
1901 hdev->ssp_debug_mode = *mode;
1902 }
1903
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)1904 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1905 {
1906 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1907
1908 if (status) {
1909 hci_conn_check_pending(hdev);
1910 return;
1911 }
1912
1913 set_bit(HCI_INQUIRY, &hdev->flags);
1914 }
1915
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)1916 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1917 {
1918 struct hci_cp_create_conn *cp;
1919 struct hci_conn *conn;
1920
1921 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1922
1923 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1924 if (!cp)
1925 return;
1926
1927 hci_dev_lock(hdev);
1928
1929 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1930
1931 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1932
1933 if (status) {
1934 if (conn && conn->state == BT_CONNECT) {
1935 if (status != 0x0c || conn->attempt > 2) {
1936 conn->state = BT_CLOSED;
1937 hci_connect_cfm(conn, status);
1938 hci_conn_del(conn);
1939 } else
1940 conn->state = BT_CONNECT2;
1941 }
1942 } else {
1943 if (!conn) {
1944 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1945 HCI_ROLE_MASTER);
1946 if (!conn)
1947 bt_dev_err(hdev, "no memory for new connection");
1948 }
1949 }
1950
1951 hci_dev_unlock(hdev);
1952 }
1953
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)1954 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1955 {
1956 struct hci_cp_add_sco *cp;
1957 struct hci_conn *acl, *sco;
1958 __u16 handle;
1959
1960 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1961
1962 if (!status)
1963 return;
1964
1965 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1966 if (!cp)
1967 return;
1968
1969 handle = __le16_to_cpu(cp->handle);
1970
1971 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1972
1973 hci_dev_lock(hdev);
1974
1975 acl = hci_conn_hash_lookup_handle(hdev, handle);
1976 if (acl) {
1977 sco = acl->link;
1978 if (sco) {
1979 sco->state = BT_CLOSED;
1980
1981 hci_connect_cfm(sco, status);
1982 hci_conn_del(sco);
1983 }
1984 }
1985
1986 hci_dev_unlock(hdev);
1987 }
1988
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)1989 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1990 {
1991 struct hci_cp_auth_requested *cp;
1992 struct hci_conn *conn;
1993
1994 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1995
1996 if (!status)
1997 return;
1998
1999 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2000 if (!cp)
2001 return;
2002
2003 hci_dev_lock(hdev);
2004
2005 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2006 if (conn) {
2007 if (conn->state == BT_CONFIG) {
2008 hci_connect_cfm(conn, status);
2009 hci_conn_drop(conn);
2010 }
2011 }
2012
2013 hci_dev_unlock(hdev);
2014 }
2015
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)2016 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2017 {
2018 struct hci_cp_set_conn_encrypt *cp;
2019 struct hci_conn *conn;
2020
2021 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2022
2023 if (!status)
2024 return;
2025
2026 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2027 if (!cp)
2028 return;
2029
2030 hci_dev_lock(hdev);
2031
2032 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2033 if (conn) {
2034 if (conn->state == BT_CONFIG) {
2035 hci_connect_cfm(conn, status);
2036 hci_conn_drop(conn);
2037 }
2038 }
2039
2040 hci_dev_unlock(hdev);
2041 }
2042
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)2043 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2044 struct hci_conn *conn)
2045 {
2046 if (conn->state != BT_CONFIG || !conn->out)
2047 return 0;
2048
2049 if (conn->pending_sec_level == BT_SECURITY_SDP)
2050 return 0;
2051
2052 /* Only request authentication for SSP connections or non-SSP
2053 * devices with sec_level MEDIUM or HIGH or if MITM protection
2054 * is requested.
2055 */
2056 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2057 conn->pending_sec_level != BT_SECURITY_FIPS &&
2058 conn->pending_sec_level != BT_SECURITY_HIGH &&
2059 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2060 return 0;
2061
2062 return 1;
2063 }
2064
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)2065 static int hci_resolve_name(struct hci_dev *hdev,
2066 struct inquiry_entry *e)
2067 {
2068 struct hci_cp_remote_name_req cp;
2069
2070 memset(&cp, 0, sizeof(cp));
2071
2072 bacpy(&cp.bdaddr, &e->data.bdaddr);
2073 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2074 cp.pscan_mode = e->data.pscan_mode;
2075 cp.clock_offset = e->data.clock_offset;
2076
2077 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2078 }
2079
hci_resolve_next_name(struct hci_dev * hdev)2080 static bool hci_resolve_next_name(struct hci_dev *hdev)
2081 {
2082 struct discovery_state *discov = &hdev->discovery;
2083 struct inquiry_entry *e;
2084
2085 if (list_empty(&discov->resolve))
2086 return false;
2087
2088 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2089 if (!e)
2090 return false;
2091
2092 if (hci_resolve_name(hdev, e) == 0) {
2093 e->name_state = NAME_PENDING;
2094 return true;
2095 }
2096
2097 return false;
2098 }
2099
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)2100 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2101 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2102 {
2103 struct discovery_state *discov = &hdev->discovery;
2104 struct inquiry_entry *e;
2105
2106 /* Update the mgmt connected state if necessary. Be careful with
2107 * conn objects that exist but are not (yet) connected however.
2108 * Only those in BT_CONFIG or BT_CONNECTED states can be
2109 * considered connected.
2110 */
2111 if (conn &&
2112 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2113 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2114 mgmt_device_connected(hdev, conn, name, name_len);
2115
2116 if (discov->state == DISCOVERY_STOPPED)
2117 return;
2118
2119 if (discov->state == DISCOVERY_STOPPING)
2120 goto discov_complete;
2121
2122 if (discov->state != DISCOVERY_RESOLVING)
2123 return;
2124
2125 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2126 /* If the device was not found in a list of found devices names of which
2127 * are pending. there is no need to continue resolving a next name as it
2128 * will be done upon receiving another Remote Name Request Complete
2129 * Event */
2130 if (!e)
2131 return;
2132
2133 list_del(&e->list);
2134 if (name) {
2135 e->name_state = NAME_KNOWN;
2136 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2137 e->data.rssi, name, name_len);
2138 } else {
2139 e->name_state = NAME_NOT_KNOWN;
2140 }
2141
2142 if (hci_resolve_next_name(hdev))
2143 return;
2144
2145 discov_complete:
2146 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2147 }
2148
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)2149 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2150 {
2151 struct hci_cp_remote_name_req *cp;
2152 struct hci_conn *conn;
2153
2154 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2155
2156 /* If successful wait for the name req complete event before
2157 * checking for the need to do authentication */
2158 if (!status)
2159 return;
2160
2161 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2162 if (!cp)
2163 return;
2164
2165 hci_dev_lock(hdev);
2166
2167 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2168
2169 if (hci_dev_test_flag(hdev, HCI_MGMT))
2170 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2171
2172 if (!conn)
2173 goto unlock;
2174
2175 if (!hci_outgoing_auth_needed(hdev, conn))
2176 goto unlock;
2177
2178 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2179 struct hci_cp_auth_requested auth_cp;
2180
2181 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2182
2183 auth_cp.handle = __cpu_to_le16(conn->handle);
2184 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2185 sizeof(auth_cp), &auth_cp);
2186 }
2187
2188 unlock:
2189 hci_dev_unlock(hdev);
2190 }
2191
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)2192 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2193 {
2194 struct hci_cp_read_remote_features *cp;
2195 struct hci_conn *conn;
2196
2197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2198
2199 if (!status)
2200 return;
2201
2202 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2203 if (!cp)
2204 return;
2205
2206 hci_dev_lock(hdev);
2207
2208 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2209 if (conn) {
2210 if (conn->state == BT_CONFIG) {
2211 hci_connect_cfm(conn, status);
2212 hci_conn_drop(conn);
2213 }
2214 }
2215
2216 hci_dev_unlock(hdev);
2217 }
2218
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)2219 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2220 {
2221 struct hci_cp_read_remote_ext_features *cp;
2222 struct hci_conn *conn;
2223
2224 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2225
2226 if (!status)
2227 return;
2228
2229 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2230 if (!cp)
2231 return;
2232
2233 hci_dev_lock(hdev);
2234
2235 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2236 if (conn) {
2237 if (conn->state == BT_CONFIG) {
2238 hci_connect_cfm(conn, status);
2239 hci_conn_drop(conn);
2240 }
2241 }
2242
2243 hci_dev_unlock(hdev);
2244 }
2245
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2246 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2247 {
2248 struct hci_cp_setup_sync_conn *cp;
2249 struct hci_conn *acl, *sco;
2250 __u16 handle;
2251
2252 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2253
2254 if (!status)
2255 return;
2256
2257 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2258 if (!cp)
2259 return;
2260
2261 handle = __le16_to_cpu(cp->handle);
2262
2263 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2264
2265 hci_dev_lock(hdev);
2266
2267 acl = hci_conn_hash_lookup_handle(hdev, handle);
2268 if (acl) {
2269 sco = acl->link;
2270 if (sco) {
2271 sco->state = BT_CLOSED;
2272
2273 hci_connect_cfm(sco, status);
2274 hci_conn_del(sco);
2275 }
2276 }
2277
2278 hci_dev_unlock(hdev);
2279 }
2280
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2281 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2282 {
2283 struct hci_cp_sniff_mode *cp;
2284 struct hci_conn *conn;
2285
2286 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2287
2288 if (!status)
2289 return;
2290
2291 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2292 if (!cp)
2293 return;
2294
2295 hci_dev_lock(hdev);
2296
2297 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2298 if (conn) {
2299 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2300
2301 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2302 hci_sco_setup(conn, status);
2303 }
2304
2305 hci_dev_unlock(hdev);
2306 }
2307
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2308 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2309 {
2310 struct hci_cp_exit_sniff_mode *cp;
2311 struct hci_conn *conn;
2312
2313 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2314
2315 if (!status)
2316 return;
2317
2318 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2319 if (!cp)
2320 return;
2321
2322 hci_dev_lock(hdev);
2323
2324 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2325 if (conn) {
2326 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2327
2328 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2329 hci_sco_setup(conn, status);
2330 }
2331
2332 hci_dev_unlock(hdev);
2333 }
2334
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2335 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2336 {
2337 struct hci_cp_disconnect *cp;
2338 struct hci_conn *conn;
2339
2340 if (!status)
2341 return;
2342
2343 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2344 if (!cp)
2345 return;
2346
2347 hci_dev_lock(hdev);
2348
2349 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2350 if (conn) {
2351 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2352 conn->dst_type, status);
2353
2354 if (conn->type == LE_LINK) {
2355 hdev->cur_adv_instance = conn->adv_instance;
2356 hci_req_reenable_advertising(hdev);
2357 }
2358
2359 /* If the disconnection failed for any reason, the upper layer
2360 * does not retry to disconnect in current implementation.
2361 * Hence, we need to do some basic cleanup here and re-enable
2362 * advertising if necessary.
2363 */
2364 hci_conn_del(conn);
2365 }
2366
2367 hci_dev_unlock(hdev);
2368 }
2369
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2370 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2371 u8 peer_addr_type, u8 own_address_type,
2372 u8 filter_policy)
2373 {
2374 struct hci_conn *conn;
2375
2376 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2377 peer_addr_type);
2378 if (!conn)
2379 return;
2380
2381 /* When using controller based address resolution, then the new
2382 * address types 0x02 and 0x03 are used. These types need to be
2383 * converted back into either public address or random address type
2384 */
2385 if (use_ll_privacy(hdev) &&
2386 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2387 switch (own_address_type) {
2388 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2389 own_address_type = ADDR_LE_DEV_PUBLIC;
2390 break;
2391 case ADDR_LE_DEV_RANDOM_RESOLVED:
2392 own_address_type = ADDR_LE_DEV_RANDOM;
2393 break;
2394 }
2395 }
2396
2397 /* Store the initiator and responder address information which
2398 * is needed for SMP. These values will not change during the
2399 * lifetime of the connection.
2400 */
2401 conn->init_addr_type = own_address_type;
2402 if (own_address_type == ADDR_LE_DEV_RANDOM)
2403 bacpy(&conn->init_addr, &hdev->random_addr);
2404 else
2405 bacpy(&conn->init_addr, &hdev->bdaddr);
2406
2407 conn->resp_addr_type = peer_addr_type;
2408 bacpy(&conn->resp_addr, peer_addr);
2409
2410 /* We don't want the connection attempt to stick around
2411 * indefinitely since LE doesn't have a page timeout concept
2412 * like BR/EDR. Set a timer for any connection that doesn't use
2413 * the accept list for connecting.
2414 */
2415 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2416 queue_delayed_work(conn->hdev->workqueue,
2417 &conn->le_conn_timeout,
2418 conn->conn_timeout);
2419 }
2420
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2421 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2422 {
2423 struct hci_cp_le_create_conn *cp;
2424
2425 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2426
2427 /* All connection failure handling is taken care of by the
2428 * hci_le_conn_failed function which is triggered by the HCI
2429 * request completion callbacks used for connecting.
2430 */
2431 if (status)
2432 return;
2433
2434 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2435 if (!cp)
2436 return;
2437
2438 hci_dev_lock(hdev);
2439
2440 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2441 cp->own_address_type, cp->filter_policy);
2442
2443 hci_dev_unlock(hdev);
2444 }
2445
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2446 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2447 {
2448 struct hci_cp_le_ext_create_conn *cp;
2449
2450 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2451
2452 /* All connection failure handling is taken care of by the
2453 * hci_le_conn_failed function which is triggered by the HCI
2454 * request completion callbacks used for connecting.
2455 */
2456 if (status)
2457 return;
2458
2459 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2460 if (!cp)
2461 return;
2462
2463 hci_dev_lock(hdev);
2464
2465 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2466 cp->own_addr_type, cp->filter_policy);
2467
2468 hci_dev_unlock(hdev);
2469 }
2470
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2471 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2472 {
2473 struct hci_cp_le_read_remote_features *cp;
2474 struct hci_conn *conn;
2475
2476 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2477
2478 if (!status)
2479 return;
2480
2481 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2482 if (!cp)
2483 return;
2484
2485 hci_dev_lock(hdev);
2486
2487 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2488 if (conn) {
2489 if (conn->state == BT_CONFIG) {
2490 hci_connect_cfm(conn, status);
2491 hci_conn_drop(conn);
2492 }
2493 }
2494
2495 hci_dev_unlock(hdev);
2496 }
2497
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2498 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2499 {
2500 struct hci_cp_le_start_enc *cp;
2501 struct hci_conn *conn;
2502
2503 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2504
2505 if (!status)
2506 return;
2507
2508 hci_dev_lock(hdev);
2509
2510 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2511 if (!cp)
2512 goto unlock;
2513
2514 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2515 if (!conn)
2516 goto unlock;
2517
2518 if (conn->state != BT_CONNECTED)
2519 goto unlock;
2520
2521 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2522 hci_conn_drop(conn);
2523
2524 unlock:
2525 hci_dev_unlock(hdev);
2526 }
2527
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2528 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2529 {
2530 struct hci_cp_switch_role *cp;
2531 struct hci_conn *conn;
2532
2533 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2534
2535 if (!status)
2536 return;
2537
2538 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2539 if (!cp)
2540 return;
2541
2542 hci_dev_lock(hdev);
2543
2544 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2545 if (conn)
2546 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2547
2548 hci_dev_unlock(hdev);
2549 }
2550
hci_inquiry_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2551 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2552 {
2553 __u8 status = *((__u8 *) skb->data);
2554 struct discovery_state *discov = &hdev->discovery;
2555 struct inquiry_entry *e;
2556
2557 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2558
2559 hci_conn_check_pending(hdev);
2560
2561 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2562 return;
2563
2564 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2565 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2566
2567 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2568 return;
2569
2570 hci_dev_lock(hdev);
2571
2572 if (discov->state != DISCOVERY_FINDING)
2573 goto unlock;
2574
2575 if (list_empty(&discov->resolve)) {
2576 /* When BR/EDR inquiry is active and no LE scanning is in
2577 * progress, then change discovery state to indicate completion.
2578 *
2579 * When running LE scanning and BR/EDR inquiry simultaneously
2580 * and the LE scan already finished, then change the discovery
2581 * state to indicate completion.
2582 */
2583 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2584 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2585 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2586 goto unlock;
2587 }
2588
2589 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2590 if (e && hci_resolve_name(hdev, e) == 0) {
2591 e->name_state = NAME_PENDING;
2592 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2593 } else {
2594 /* When BR/EDR inquiry is active and no LE scanning is in
2595 * progress, then change discovery state to indicate completion.
2596 *
2597 * When running LE scanning and BR/EDR inquiry simultaneously
2598 * and the LE scan already finished, then change the discovery
2599 * state to indicate completion.
2600 */
2601 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2602 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2603 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2604 }
2605
2606 unlock:
2607 hci_dev_unlock(hdev);
2608 }
2609
hci_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)2610 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2611 {
2612 struct inquiry_data data;
2613 struct inquiry_info *info = (void *) (skb->data + 1);
2614 int num_rsp = *((__u8 *) skb->data);
2615
2616 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2617
2618 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2619 return;
2620
2621 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2622 return;
2623
2624 hci_dev_lock(hdev);
2625
2626 for (; num_rsp; num_rsp--, info++) {
2627 u32 flags;
2628
2629 bacpy(&data.bdaddr, &info->bdaddr);
2630 data.pscan_rep_mode = info->pscan_rep_mode;
2631 data.pscan_period_mode = info->pscan_period_mode;
2632 data.pscan_mode = info->pscan_mode;
2633 memcpy(data.dev_class, info->dev_class, 3);
2634 data.clock_offset = info->clock_offset;
2635 data.rssi = HCI_RSSI_INVALID;
2636 data.ssp_mode = 0x00;
2637
2638 flags = hci_inquiry_cache_update(hdev, &data, false);
2639
2640 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2641 info->dev_class, HCI_RSSI_INVALID,
2642 flags, NULL, 0, NULL, 0);
2643 }
2644
2645 hci_dev_unlock(hdev);
2646 }
2647
hci_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2648 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2649 {
2650 struct hci_ev_conn_complete *ev = (void *) skb->data;
2651 struct hci_conn *conn;
2652
2653 BT_DBG("%s", hdev->name);
2654
2655 hci_dev_lock(hdev);
2656
2657 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2658 if (!conn) {
2659 /* Connection may not exist if auto-connected. Check the bredr
2660 * allowlist to see if this device is allowed to auto connect.
2661 * If link is an ACL type, create a connection class
2662 * automatically.
2663 *
2664 * Auto-connect will only occur if the event filter is
2665 * programmed with a given address. Right now, event filter is
2666 * only used during suspend.
2667 */
2668 if (ev->link_type == ACL_LINK &&
2669 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2670 &ev->bdaddr,
2671 BDADDR_BREDR)) {
2672 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2673 HCI_ROLE_SLAVE);
2674 if (!conn) {
2675 bt_dev_err(hdev, "no memory for new conn");
2676 goto unlock;
2677 }
2678 } else {
2679 if (ev->link_type != SCO_LINK)
2680 goto unlock;
2681
2682 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2683 &ev->bdaddr);
2684 if (!conn)
2685 goto unlock;
2686
2687 conn->type = SCO_LINK;
2688 }
2689 }
2690
2691 if (!ev->status) {
2692 conn->handle = __le16_to_cpu(ev->handle);
2693
2694 if (conn->type == ACL_LINK) {
2695 conn->state = BT_CONFIG;
2696 hci_conn_hold(conn);
2697
2698 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2699 !hci_find_link_key(hdev, &ev->bdaddr))
2700 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2701 else
2702 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2703 } else
2704 conn->state = BT_CONNECTED;
2705
2706 hci_debugfs_create_conn(conn);
2707 hci_conn_add_sysfs(conn);
2708
2709 if (test_bit(HCI_AUTH, &hdev->flags))
2710 set_bit(HCI_CONN_AUTH, &conn->flags);
2711
2712 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2713 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2714
2715 /* Get remote features */
2716 if (conn->type == ACL_LINK) {
2717 struct hci_cp_read_remote_features cp;
2718 cp.handle = ev->handle;
2719 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2720 sizeof(cp), &cp);
2721
2722 hci_req_update_scan(hdev);
2723 }
2724
2725 /* Set packet type for incoming connection */
2726 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2727 struct hci_cp_change_conn_ptype cp;
2728 cp.handle = ev->handle;
2729 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2730 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2731 &cp);
2732 }
2733 } else {
2734 conn->state = BT_CLOSED;
2735 if (conn->type == ACL_LINK)
2736 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2737 conn->dst_type, ev->status);
2738 }
2739
2740 if (conn->type == ACL_LINK)
2741 hci_sco_setup(conn, ev->status);
2742
2743 if (ev->status) {
2744 hci_connect_cfm(conn, ev->status);
2745 hci_conn_del(conn);
2746 } else if (ev->link_type == SCO_LINK) {
2747 switch (conn->setting & SCO_AIRMODE_MASK) {
2748 case SCO_AIRMODE_CVSD:
2749 if (hdev->notify)
2750 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2751 break;
2752 }
2753
2754 hci_connect_cfm(conn, ev->status);
2755 }
2756
2757 unlock:
2758 hci_dev_unlock(hdev);
2759
2760 hci_conn_check_pending(hdev);
2761 }
2762
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)2763 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2764 {
2765 struct hci_cp_reject_conn_req cp;
2766
2767 bacpy(&cp.bdaddr, bdaddr);
2768 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2769 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2770 }
2771
hci_conn_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2772 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2773 {
2774 struct hci_ev_conn_request *ev = (void *) skb->data;
2775 int mask = hdev->link_mode;
2776 struct inquiry_entry *ie;
2777 struct hci_conn *conn;
2778 __u8 flags = 0;
2779
2780 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2781 ev->link_type);
2782
2783 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2784 &flags);
2785
2786 if (!(mask & HCI_LM_ACCEPT)) {
2787 hci_reject_conn(hdev, &ev->bdaddr);
2788 return;
2789 }
2790
2791 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2792 BDADDR_BREDR)) {
2793 hci_reject_conn(hdev, &ev->bdaddr);
2794 return;
2795 }
2796
2797 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2798 * connection. These features are only touched through mgmt so
2799 * only do the checks if HCI_MGMT is set.
2800 */
2801 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2802 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2803 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2804 BDADDR_BREDR)) {
2805 hci_reject_conn(hdev, &ev->bdaddr);
2806 return;
2807 }
2808
2809 /* Connection accepted */
2810
2811 hci_dev_lock(hdev);
2812
2813 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2814 if (ie)
2815 memcpy(ie->data.dev_class, ev->dev_class, 3);
2816
2817 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2818 &ev->bdaddr);
2819 if (!conn) {
2820 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2821 HCI_ROLE_SLAVE);
2822 if (!conn) {
2823 bt_dev_err(hdev, "no memory for new connection");
2824 hci_dev_unlock(hdev);
2825 return;
2826 }
2827 }
2828
2829 memcpy(conn->dev_class, ev->dev_class, 3);
2830
2831 hci_dev_unlock(hdev);
2832
2833 if (ev->link_type == ACL_LINK ||
2834 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2835 struct hci_cp_accept_conn_req cp;
2836 conn->state = BT_CONNECT;
2837
2838 bacpy(&cp.bdaddr, &ev->bdaddr);
2839
2840 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2841 cp.role = 0x00; /* Become central */
2842 else
2843 cp.role = 0x01; /* Remain peripheral */
2844
2845 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2846 } else if (!(flags & HCI_PROTO_DEFER)) {
2847 struct hci_cp_accept_sync_conn_req cp;
2848 conn->state = BT_CONNECT;
2849
2850 bacpy(&cp.bdaddr, &ev->bdaddr);
2851 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2852
2853 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2854 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2855 cp.max_latency = cpu_to_le16(0xffff);
2856 cp.content_format = cpu_to_le16(hdev->voice_setting);
2857 cp.retrans_effort = 0xff;
2858
2859 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2860 &cp);
2861 } else {
2862 conn->state = BT_CONNECT2;
2863 hci_connect_cfm(conn, 0);
2864 }
2865 }
2866
hci_to_mgmt_reason(u8 err)2867 static u8 hci_to_mgmt_reason(u8 err)
2868 {
2869 switch (err) {
2870 case HCI_ERROR_CONNECTION_TIMEOUT:
2871 return MGMT_DEV_DISCONN_TIMEOUT;
2872 case HCI_ERROR_REMOTE_USER_TERM:
2873 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2874 case HCI_ERROR_REMOTE_POWER_OFF:
2875 return MGMT_DEV_DISCONN_REMOTE;
2876 case HCI_ERROR_LOCAL_HOST_TERM:
2877 return MGMT_DEV_DISCONN_LOCAL_HOST;
2878 default:
2879 return MGMT_DEV_DISCONN_UNKNOWN;
2880 }
2881 }
2882
hci_disconn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2883 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2884 {
2885 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2886 u8 reason;
2887 struct hci_conn_params *params;
2888 struct hci_conn *conn;
2889 bool mgmt_connected;
2890
2891 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2892
2893 hci_dev_lock(hdev);
2894
2895 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2896 if (!conn)
2897 goto unlock;
2898
2899 if (ev->status) {
2900 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2901 conn->dst_type, ev->status);
2902 goto unlock;
2903 }
2904
2905 conn->state = BT_CLOSED;
2906
2907 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2908
2909 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2910 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2911 else
2912 reason = hci_to_mgmt_reason(ev->reason);
2913
2914 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2915 reason, mgmt_connected);
2916
2917 if (conn->type == ACL_LINK) {
2918 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2919 hci_remove_link_key(hdev, &conn->dst);
2920
2921 hci_req_update_scan(hdev);
2922 }
2923
2924 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2925 if (params) {
2926 switch (params->auto_connect) {
2927 case HCI_AUTO_CONN_LINK_LOSS:
2928 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2929 break;
2930 fallthrough;
2931
2932 case HCI_AUTO_CONN_DIRECT:
2933 case HCI_AUTO_CONN_ALWAYS:
2934 list_del_init(¶ms->action);
2935 list_add(¶ms->action, &hdev->pend_le_conns);
2936 hci_update_background_scan(hdev);
2937 break;
2938
2939 default:
2940 break;
2941 }
2942 }
2943
2944 hci_disconn_cfm(conn, ev->reason);
2945
2946 /* The suspend notifier is waiting for all devices to disconnect so
2947 * clear the bit from pending tasks and inform the wait queue.
2948 */
2949 if (list_empty(&hdev->conn_hash.list) &&
2950 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2951 wake_up(&hdev->suspend_wait_q);
2952 }
2953
2954 /* Re-enable advertising if necessary, since it might
2955 * have been disabled by the connection. From the
2956 * HCI_LE_Set_Advertise_Enable command description in
2957 * the core specification (v4.0):
2958 * "The Controller shall continue advertising until the Host
2959 * issues an LE_Set_Advertise_Enable command with
2960 * Advertising_Enable set to 0x00 (Advertising is disabled)
2961 * or until a connection is created or until the Advertising
2962 * is timed out due to Directed Advertising."
2963 */
2964 if (conn->type == LE_LINK) {
2965 hdev->cur_adv_instance = conn->adv_instance;
2966 hci_req_reenable_advertising(hdev);
2967 }
2968
2969 hci_conn_del(conn);
2970
2971 unlock:
2972 hci_dev_unlock(hdev);
2973 }
2974
hci_auth_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2975 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2976 {
2977 struct hci_ev_auth_complete *ev = (void *) skb->data;
2978 struct hci_conn *conn;
2979
2980 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2981
2982 hci_dev_lock(hdev);
2983
2984 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2985 if (!conn)
2986 goto unlock;
2987
2988 if (!ev->status) {
2989 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2990
2991 if (!hci_conn_ssp_enabled(conn) &&
2992 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2993 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2994 } else {
2995 set_bit(HCI_CONN_AUTH, &conn->flags);
2996 conn->sec_level = conn->pending_sec_level;
2997 }
2998 } else {
2999 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3000 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3001
3002 mgmt_auth_failed(conn, ev->status);
3003 }
3004
3005 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3006 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3007
3008 if (conn->state == BT_CONFIG) {
3009 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3010 struct hci_cp_set_conn_encrypt cp;
3011 cp.handle = ev->handle;
3012 cp.encrypt = 0x01;
3013 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3014 &cp);
3015 } else {
3016 conn->state = BT_CONNECTED;
3017 hci_connect_cfm(conn, ev->status);
3018 hci_conn_drop(conn);
3019 }
3020 } else {
3021 hci_auth_cfm(conn, ev->status);
3022
3023 hci_conn_hold(conn);
3024 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3025 hci_conn_drop(conn);
3026 }
3027
3028 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3029 if (!ev->status) {
3030 struct hci_cp_set_conn_encrypt cp;
3031 cp.handle = ev->handle;
3032 cp.encrypt = 0x01;
3033 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3034 &cp);
3035 } else {
3036 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3037 hci_encrypt_cfm(conn, ev->status);
3038 }
3039 }
3040
3041 unlock:
3042 hci_dev_unlock(hdev);
3043 }
3044
hci_remote_name_evt(struct hci_dev * hdev,struct sk_buff * skb)3045 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3046 {
3047 struct hci_ev_remote_name *ev = (void *) skb->data;
3048 struct hci_conn *conn;
3049
3050 BT_DBG("%s", hdev->name);
3051
3052 hci_conn_check_pending(hdev);
3053
3054 hci_dev_lock(hdev);
3055
3056 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3057
3058 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3059 goto check_auth;
3060
3061 if (ev->status == 0)
3062 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3063 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3064 else
3065 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3066
3067 check_auth:
3068 if (!conn)
3069 goto unlock;
3070
3071 if (!hci_outgoing_auth_needed(hdev, conn))
3072 goto unlock;
3073
3074 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3075 struct hci_cp_auth_requested cp;
3076
3077 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3078
3079 cp.handle = __cpu_to_le16(conn->handle);
3080 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3081 }
3082
3083 unlock:
3084 hci_dev_unlock(hdev);
3085 }
3086
read_enc_key_size_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)3087 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3088 u16 opcode, struct sk_buff *skb)
3089 {
3090 const struct hci_rp_read_enc_key_size *rp;
3091 struct hci_conn *conn;
3092 u16 handle;
3093
3094 BT_DBG("%s status 0x%02x", hdev->name, status);
3095
3096 if (!skb || skb->len < sizeof(*rp)) {
3097 bt_dev_err(hdev, "invalid read key size response");
3098 return;
3099 }
3100
3101 rp = (void *)skb->data;
3102 handle = le16_to_cpu(rp->handle);
3103
3104 hci_dev_lock(hdev);
3105
3106 conn = hci_conn_hash_lookup_handle(hdev, handle);
3107 if (!conn)
3108 goto unlock;
3109
3110 /* While unexpected, the read_enc_key_size command may fail. The most
3111 * secure approach is to then assume the key size is 0 to force a
3112 * disconnection.
3113 */
3114 if (rp->status) {
3115 bt_dev_err(hdev, "failed to read key size for handle %u",
3116 handle);
3117 conn->enc_key_size = 0;
3118 } else {
3119 conn->enc_key_size = rp->key_size;
3120 }
3121
3122 hci_encrypt_cfm(conn, 0);
3123
3124 unlock:
3125 hci_dev_unlock(hdev);
3126 }
3127
hci_encrypt_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3128 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3129 {
3130 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3131 struct hci_conn *conn;
3132
3133 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3134
3135 hci_dev_lock(hdev);
3136
3137 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3138 if (!conn)
3139 goto unlock;
3140
3141 if (!ev->status) {
3142 if (ev->encrypt) {
3143 /* Encryption implies authentication */
3144 set_bit(HCI_CONN_AUTH, &conn->flags);
3145 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3146 conn->sec_level = conn->pending_sec_level;
3147
3148 /* P-256 authentication key implies FIPS */
3149 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3150 set_bit(HCI_CONN_FIPS, &conn->flags);
3151
3152 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3153 conn->type == LE_LINK)
3154 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3155 } else {
3156 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3157 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3158 }
3159 }
3160
3161 /* We should disregard the current RPA and generate a new one
3162 * whenever the encryption procedure fails.
3163 */
3164 if (ev->status && conn->type == LE_LINK) {
3165 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3166 hci_adv_instances_set_rpa_expired(hdev, true);
3167 }
3168
3169 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3170
3171 /* Check link security requirements are met */
3172 if (!hci_conn_check_link_mode(conn))
3173 ev->status = HCI_ERROR_AUTH_FAILURE;
3174
3175 if (ev->status && conn->state == BT_CONNECTED) {
3176 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3177 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3178
3179 /* Notify upper layers so they can cleanup before
3180 * disconnecting.
3181 */
3182 hci_encrypt_cfm(conn, ev->status);
3183 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3184 hci_conn_drop(conn);
3185 goto unlock;
3186 }
3187
3188 /* Try reading the encryption key size for encrypted ACL links */
3189 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3190 struct hci_cp_read_enc_key_size cp;
3191 struct hci_request req;
3192
3193 /* Only send HCI_Read_Encryption_Key_Size if the
3194 * controller really supports it. If it doesn't, assume
3195 * the default size (16).
3196 */
3197 if (!(hdev->commands[20] & 0x10)) {
3198 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3199 goto notify;
3200 }
3201
3202 hci_req_init(&req, hdev);
3203
3204 cp.handle = cpu_to_le16(conn->handle);
3205 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3206
3207 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3208 bt_dev_err(hdev, "sending read key size failed");
3209 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3210 goto notify;
3211 }
3212
3213 goto unlock;
3214 }
3215
3216 /* Set the default Authenticated Payload Timeout after
3217 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3218 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3219 * sent when the link is active and Encryption is enabled, the conn
3220 * type can be either LE or ACL and controller must support LMP Ping.
3221 * Ensure for AES-CCM encryption as well.
3222 */
3223 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3224 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3225 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3226 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3227 struct hci_cp_write_auth_payload_to cp;
3228
3229 cp.handle = cpu_to_le16(conn->handle);
3230 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3231 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3232 sizeof(cp), &cp);
3233 }
3234
3235 notify:
3236 hci_encrypt_cfm(conn, ev->status);
3237
3238 unlock:
3239 hci_dev_unlock(hdev);
3240 }
3241
hci_change_link_key_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3242 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3243 struct sk_buff *skb)
3244 {
3245 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3246 struct hci_conn *conn;
3247
3248 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3249
3250 hci_dev_lock(hdev);
3251
3252 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3253 if (conn) {
3254 if (!ev->status)
3255 set_bit(HCI_CONN_SECURE, &conn->flags);
3256
3257 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3258
3259 hci_key_change_cfm(conn, ev->status);
3260 }
3261
3262 hci_dev_unlock(hdev);
3263 }
3264
hci_remote_features_evt(struct hci_dev * hdev,struct sk_buff * skb)3265 static void hci_remote_features_evt(struct hci_dev *hdev,
3266 struct sk_buff *skb)
3267 {
3268 struct hci_ev_remote_features *ev = (void *) skb->data;
3269 struct hci_conn *conn;
3270
3271 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3272
3273 hci_dev_lock(hdev);
3274
3275 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3276 if (!conn)
3277 goto unlock;
3278
3279 if (!ev->status)
3280 memcpy(conn->features[0], ev->features, 8);
3281
3282 if (conn->state != BT_CONFIG)
3283 goto unlock;
3284
3285 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3286 lmp_ext_feat_capable(conn)) {
3287 struct hci_cp_read_remote_ext_features cp;
3288 cp.handle = ev->handle;
3289 cp.page = 0x01;
3290 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3291 sizeof(cp), &cp);
3292 goto unlock;
3293 }
3294
3295 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3296 struct hci_cp_remote_name_req cp;
3297 memset(&cp, 0, sizeof(cp));
3298 bacpy(&cp.bdaddr, &conn->dst);
3299 cp.pscan_rep_mode = 0x02;
3300 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3301 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3302 mgmt_device_connected(hdev, conn, NULL, 0);
3303
3304 if (!hci_outgoing_auth_needed(hdev, conn)) {
3305 conn->state = BT_CONNECTED;
3306 hci_connect_cfm(conn, ev->status);
3307 hci_conn_drop(conn);
3308 }
3309
3310 unlock:
3311 hci_dev_unlock(hdev);
3312 }
3313
handle_cmd_cnt_and_timer(struct hci_dev * hdev,u8 ncmd)3314 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3315 {
3316 cancel_delayed_work(&hdev->cmd_timer);
3317
3318 if (!test_bit(HCI_RESET, &hdev->flags)) {
3319 if (ncmd) {
3320 cancel_delayed_work(&hdev->ncmd_timer);
3321 atomic_set(&hdev->cmd_cnt, 1);
3322 } else {
3323 schedule_delayed_work(&hdev->ncmd_timer,
3324 HCI_NCMD_TIMEOUT);
3325 }
3326 }
3327 }
3328
hci_cmd_complete_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3329 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3330 u16 *opcode, u8 *status,
3331 hci_req_complete_t *req_complete,
3332 hci_req_complete_skb_t *req_complete_skb)
3333 {
3334 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3335
3336 *opcode = __le16_to_cpu(ev->opcode);
3337 *status = skb->data[sizeof(*ev)];
3338
3339 skb_pull(skb, sizeof(*ev));
3340
3341 switch (*opcode) {
3342 case HCI_OP_INQUIRY_CANCEL:
3343 hci_cc_inquiry_cancel(hdev, skb, status);
3344 break;
3345
3346 case HCI_OP_PERIODIC_INQ:
3347 hci_cc_periodic_inq(hdev, skb);
3348 break;
3349
3350 case HCI_OP_EXIT_PERIODIC_INQ:
3351 hci_cc_exit_periodic_inq(hdev, skb);
3352 break;
3353
3354 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3355 hci_cc_remote_name_req_cancel(hdev, skb);
3356 break;
3357
3358 case HCI_OP_ROLE_DISCOVERY:
3359 hci_cc_role_discovery(hdev, skb);
3360 break;
3361
3362 case HCI_OP_READ_LINK_POLICY:
3363 hci_cc_read_link_policy(hdev, skb);
3364 break;
3365
3366 case HCI_OP_WRITE_LINK_POLICY:
3367 hci_cc_write_link_policy(hdev, skb);
3368 break;
3369
3370 case HCI_OP_READ_DEF_LINK_POLICY:
3371 hci_cc_read_def_link_policy(hdev, skb);
3372 break;
3373
3374 case HCI_OP_WRITE_DEF_LINK_POLICY:
3375 hci_cc_write_def_link_policy(hdev, skb);
3376 break;
3377
3378 case HCI_OP_RESET:
3379 hci_cc_reset(hdev, skb);
3380 break;
3381
3382 case HCI_OP_READ_STORED_LINK_KEY:
3383 hci_cc_read_stored_link_key(hdev, skb);
3384 break;
3385
3386 case HCI_OP_DELETE_STORED_LINK_KEY:
3387 hci_cc_delete_stored_link_key(hdev, skb);
3388 break;
3389
3390 case HCI_OP_WRITE_LOCAL_NAME:
3391 hci_cc_write_local_name(hdev, skb);
3392 break;
3393
3394 case HCI_OP_READ_LOCAL_NAME:
3395 hci_cc_read_local_name(hdev, skb);
3396 break;
3397
3398 case HCI_OP_WRITE_AUTH_ENABLE:
3399 hci_cc_write_auth_enable(hdev, skb);
3400 break;
3401
3402 case HCI_OP_WRITE_ENCRYPT_MODE:
3403 hci_cc_write_encrypt_mode(hdev, skb);
3404 break;
3405
3406 case HCI_OP_WRITE_SCAN_ENABLE:
3407 hci_cc_write_scan_enable(hdev, skb);
3408 break;
3409
3410 case HCI_OP_SET_EVENT_FLT:
3411 hci_cc_set_event_filter(hdev, skb);
3412 break;
3413
3414 case HCI_OP_READ_CLASS_OF_DEV:
3415 hci_cc_read_class_of_dev(hdev, skb);
3416 break;
3417
3418 case HCI_OP_WRITE_CLASS_OF_DEV:
3419 hci_cc_write_class_of_dev(hdev, skb);
3420 break;
3421
3422 case HCI_OP_READ_VOICE_SETTING:
3423 hci_cc_read_voice_setting(hdev, skb);
3424 break;
3425
3426 case HCI_OP_WRITE_VOICE_SETTING:
3427 hci_cc_write_voice_setting(hdev, skb);
3428 break;
3429
3430 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3431 hci_cc_read_num_supported_iac(hdev, skb);
3432 break;
3433
3434 case HCI_OP_WRITE_SSP_MODE:
3435 hci_cc_write_ssp_mode(hdev, skb);
3436 break;
3437
3438 case HCI_OP_WRITE_SC_SUPPORT:
3439 hci_cc_write_sc_support(hdev, skb);
3440 break;
3441
3442 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3443 hci_cc_read_auth_payload_timeout(hdev, skb);
3444 break;
3445
3446 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3447 hci_cc_write_auth_payload_timeout(hdev, skb);
3448 break;
3449
3450 case HCI_OP_READ_LOCAL_VERSION:
3451 hci_cc_read_local_version(hdev, skb);
3452 break;
3453
3454 case HCI_OP_READ_LOCAL_COMMANDS:
3455 hci_cc_read_local_commands(hdev, skb);
3456 break;
3457
3458 case HCI_OP_READ_LOCAL_FEATURES:
3459 hci_cc_read_local_features(hdev, skb);
3460 break;
3461
3462 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3463 hci_cc_read_local_ext_features(hdev, skb);
3464 break;
3465
3466 case HCI_OP_READ_BUFFER_SIZE:
3467 hci_cc_read_buffer_size(hdev, skb);
3468 break;
3469
3470 case HCI_OP_READ_BD_ADDR:
3471 hci_cc_read_bd_addr(hdev, skb);
3472 break;
3473
3474 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3475 hci_cc_read_local_pairing_opts(hdev, skb);
3476 break;
3477
3478 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3479 hci_cc_read_page_scan_activity(hdev, skb);
3480 break;
3481
3482 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3483 hci_cc_write_page_scan_activity(hdev, skb);
3484 break;
3485
3486 case HCI_OP_READ_PAGE_SCAN_TYPE:
3487 hci_cc_read_page_scan_type(hdev, skb);
3488 break;
3489
3490 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3491 hci_cc_write_page_scan_type(hdev, skb);
3492 break;
3493
3494 case HCI_OP_READ_DATA_BLOCK_SIZE:
3495 hci_cc_read_data_block_size(hdev, skb);
3496 break;
3497
3498 case HCI_OP_READ_FLOW_CONTROL_MODE:
3499 hci_cc_read_flow_control_mode(hdev, skb);
3500 break;
3501
3502 case HCI_OP_READ_LOCAL_AMP_INFO:
3503 hci_cc_read_local_amp_info(hdev, skb);
3504 break;
3505
3506 case HCI_OP_READ_CLOCK:
3507 hci_cc_read_clock(hdev, skb);
3508 break;
3509
3510 case HCI_OP_READ_INQ_RSP_TX_POWER:
3511 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3512 break;
3513
3514 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3515 hci_cc_read_def_err_data_reporting(hdev, skb);
3516 break;
3517
3518 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3519 hci_cc_write_def_err_data_reporting(hdev, skb);
3520 break;
3521
3522 case HCI_OP_PIN_CODE_REPLY:
3523 hci_cc_pin_code_reply(hdev, skb);
3524 break;
3525
3526 case HCI_OP_PIN_CODE_NEG_REPLY:
3527 hci_cc_pin_code_neg_reply(hdev, skb);
3528 break;
3529
3530 case HCI_OP_READ_LOCAL_OOB_DATA:
3531 hci_cc_read_local_oob_data(hdev, skb);
3532 break;
3533
3534 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3535 hci_cc_read_local_oob_ext_data(hdev, skb);
3536 break;
3537
3538 case HCI_OP_LE_READ_BUFFER_SIZE:
3539 hci_cc_le_read_buffer_size(hdev, skb);
3540 break;
3541
3542 case HCI_OP_LE_READ_LOCAL_FEATURES:
3543 hci_cc_le_read_local_features(hdev, skb);
3544 break;
3545
3546 case HCI_OP_LE_READ_ADV_TX_POWER:
3547 hci_cc_le_read_adv_tx_power(hdev, skb);
3548 break;
3549
3550 case HCI_OP_USER_CONFIRM_REPLY:
3551 hci_cc_user_confirm_reply(hdev, skb);
3552 break;
3553
3554 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3555 hci_cc_user_confirm_neg_reply(hdev, skb);
3556 break;
3557
3558 case HCI_OP_USER_PASSKEY_REPLY:
3559 hci_cc_user_passkey_reply(hdev, skb);
3560 break;
3561
3562 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3563 hci_cc_user_passkey_neg_reply(hdev, skb);
3564 break;
3565
3566 case HCI_OP_LE_SET_RANDOM_ADDR:
3567 hci_cc_le_set_random_addr(hdev, skb);
3568 break;
3569
3570 case HCI_OP_LE_SET_ADV_ENABLE:
3571 hci_cc_le_set_adv_enable(hdev, skb);
3572 break;
3573
3574 case HCI_OP_LE_SET_SCAN_PARAM:
3575 hci_cc_le_set_scan_param(hdev, skb);
3576 break;
3577
3578 case HCI_OP_LE_SET_SCAN_ENABLE:
3579 hci_cc_le_set_scan_enable(hdev, skb);
3580 break;
3581
3582 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3583 hci_cc_le_read_accept_list_size(hdev, skb);
3584 break;
3585
3586 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3587 hci_cc_le_clear_accept_list(hdev, skb);
3588 break;
3589
3590 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3591 hci_cc_le_add_to_accept_list(hdev, skb);
3592 break;
3593
3594 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3595 hci_cc_le_del_from_accept_list(hdev, skb);
3596 break;
3597
3598 case HCI_OP_LE_READ_SUPPORTED_STATES:
3599 hci_cc_le_read_supported_states(hdev, skb);
3600 break;
3601
3602 case HCI_OP_LE_READ_DEF_DATA_LEN:
3603 hci_cc_le_read_def_data_len(hdev, skb);
3604 break;
3605
3606 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3607 hci_cc_le_write_def_data_len(hdev, skb);
3608 break;
3609
3610 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3611 hci_cc_le_add_to_resolv_list(hdev, skb);
3612 break;
3613
3614 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3615 hci_cc_le_del_from_resolv_list(hdev, skb);
3616 break;
3617
3618 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3619 hci_cc_le_clear_resolv_list(hdev, skb);
3620 break;
3621
3622 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3623 hci_cc_le_read_resolv_list_size(hdev, skb);
3624 break;
3625
3626 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3627 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3628 break;
3629
3630 case HCI_OP_LE_READ_MAX_DATA_LEN:
3631 hci_cc_le_read_max_data_len(hdev, skb);
3632 break;
3633
3634 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3635 hci_cc_write_le_host_supported(hdev, skb);
3636 break;
3637
3638 case HCI_OP_LE_SET_ADV_PARAM:
3639 hci_cc_set_adv_param(hdev, skb);
3640 break;
3641
3642 case HCI_OP_READ_RSSI:
3643 hci_cc_read_rssi(hdev, skb);
3644 break;
3645
3646 case HCI_OP_READ_TX_POWER:
3647 hci_cc_read_tx_power(hdev, skb);
3648 break;
3649
3650 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3651 hci_cc_write_ssp_debug_mode(hdev, skb);
3652 break;
3653
3654 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3655 hci_cc_le_set_ext_scan_param(hdev, skb);
3656 break;
3657
3658 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3659 hci_cc_le_set_ext_scan_enable(hdev, skb);
3660 break;
3661
3662 case HCI_OP_LE_SET_DEFAULT_PHY:
3663 hci_cc_le_set_default_phy(hdev, skb);
3664 break;
3665
3666 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3667 hci_cc_le_read_num_adv_sets(hdev, skb);
3668 break;
3669
3670 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3671 hci_cc_set_ext_adv_param(hdev, skb);
3672 break;
3673
3674 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3675 hci_cc_le_set_ext_adv_enable(hdev, skb);
3676 break;
3677
3678 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3679 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3680 break;
3681
3682 case HCI_OP_LE_READ_TRANSMIT_POWER:
3683 hci_cc_le_read_transmit_power(hdev, skb);
3684 break;
3685
3686 default:
3687 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3688 break;
3689 }
3690
3691 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3692
3693 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3694 req_complete_skb);
3695
3696 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3697 bt_dev_err(hdev,
3698 "unexpected event for opcode 0x%4.4x", *opcode);
3699 return;
3700 }
3701
3702 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3703 queue_work(hdev->workqueue, &hdev->cmd_work);
3704 }
3705
hci_cmd_status_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3706 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3707 u16 *opcode, u8 *status,
3708 hci_req_complete_t *req_complete,
3709 hci_req_complete_skb_t *req_complete_skb)
3710 {
3711 struct hci_ev_cmd_status *ev = (void *) skb->data;
3712
3713 skb_pull(skb, sizeof(*ev));
3714
3715 *opcode = __le16_to_cpu(ev->opcode);
3716 *status = ev->status;
3717
3718 switch (*opcode) {
3719 case HCI_OP_INQUIRY:
3720 hci_cs_inquiry(hdev, ev->status);
3721 break;
3722
3723 case HCI_OP_CREATE_CONN:
3724 hci_cs_create_conn(hdev, ev->status);
3725 break;
3726
3727 case HCI_OP_DISCONNECT:
3728 hci_cs_disconnect(hdev, ev->status);
3729 break;
3730
3731 case HCI_OP_ADD_SCO:
3732 hci_cs_add_sco(hdev, ev->status);
3733 break;
3734
3735 case HCI_OP_AUTH_REQUESTED:
3736 hci_cs_auth_requested(hdev, ev->status);
3737 break;
3738
3739 case HCI_OP_SET_CONN_ENCRYPT:
3740 hci_cs_set_conn_encrypt(hdev, ev->status);
3741 break;
3742
3743 case HCI_OP_REMOTE_NAME_REQ:
3744 hci_cs_remote_name_req(hdev, ev->status);
3745 break;
3746
3747 case HCI_OP_READ_REMOTE_FEATURES:
3748 hci_cs_read_remote_features(hdev, ev->status);
3749 break;
3750
3751 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3752 hci_cs_read_remote_ext_features(hdev, ev->status);
3753 break;
3754
3755 case HCI_OP_SETUP_SYNC_CONN:
3756 hci_cs_setup_sync_conn(hdev, ev->status);
3757 break;
3758
3759 case HCI_OP_SNIFF_MODE:
3760 hci_cs_sniff_mode(hdev, ev->status);
3761 break;
3762
3763 case HCI_OP_EXIT_SNIFF_MODE:
3764 hci_cs_exit_sniff_mode(hdev, ev->status);
3765 break;
3766
3767 case HCI_OP_SWITCH_ROLE:
3768 hci_cs_switch_role(hdev, ev->status);
3769 break;
3770
3771 case HCI_OP_LE_CREATE_CONN:
3772 hci_cs_le_create_conn(hdev, ev->status);
3773 break;
3774
3775 case HCI_OP_LE_READ_REMOTE_FEATURES:
3776 hci_cs_le_read_remote_features(hdev, ev->status);
3777 break;
3778
3779 case HCI_OP_LE_START_ENC:
3780 hci_cs_le_start_enc(hdev, ev->status);
3781 break;
3782
3783 case HCI_OP_LE_EXT_CREATE_CONN:
3784 hci_cs_le_ext_create_conn(hdev, ev->status);
3785 break;
3786
3787 default:
3788 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3789 break;
3790 }
3791
3792 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3793
3794 /* Indicate request completion if the command failed. Also, if
3795 * we're not waiting for a special event and we get a success
3796 * command status we should try to flag the request as completed
3797 * (since for this kind of commands there will not be a command
3798 * complete event).
3799 */
3800 if (ev->status ||
3801 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3802 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3803 req_complete_skb);
3804
3805 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3806 bt_dev_err(hdev,
3807 "unexpected event for opcode 0x%4.4x", *opcode);
3808 return;
3809 }
3810
3811 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3812 queue_work(hdev->workqueue, &hdev->cmd_work);
3813 }
3814
hci_hardware_error_evt(struct hci_dev * hdev,struct sk_buff * skb)3815 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3816 {
3817 struct hci_ev_hardware_error *ev = (void *) skb->data;
3818
3819 hdev->hw_error_code = ev->code;
3820
3821 queue_work(hdev->req_workqueue, &hdev->error_reset);
3822 }
3823
hci_role_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3824 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3825 {
3826 struct hci_ev_role_change *ev = (void *) skb->data;
3827 struct hci_conn *conn;
3828
3829 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3830
3831 hci_dev_lock(hdev);
3832
3833 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3834 if (conn) {
3835 if (!ev->status)
3836 conn->role = ev->role;
3837
3838 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3839
3840 hci_role_switch_cfm(conn, ev->status, ev->role);
3841 }
3842
3843 hci_dev_unlock(hdev);
3844 }
3845
hci_num_comp_pkts_evt(struct hci_dev * hdev,struct sk_buff * skb)3846 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3847 {
3848 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3849 int i;
3850
3851 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3852 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3853 return;
3854 }
3855
3856 if (skb->len < sizeof(*ev) ||
3857 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3858 BT_DBG("%s bad parameters", hdev->name);
3859 return;
3860 }
3861
3862 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3863
3864 for (i = 0; i < ev->num_hndl; i++) {
3865 struct hci_comp_pkts_info *info = &ev->handles[i];
3866 struct hci_conn *conn;
3867 __u16 handle, count;
3868
3869 handle = __le16_to_cpu(info->handle);
3870 count = __le16_to_cpu(info->count);
3871
3872 conn = hci_conn_hash_lookup_handle(hdev, handle);
3873 if (!conn)
3874 continue;
3875
3876 conn->sent -= count;
3877
3878 switch (conn->type) {
3879 case ACL_LINK:
3880 hdev->acl_cnt += count;
3881 if (hdev->acl_cnt > hdev->acl_pkts)
3882 hdev->acl_cnt = hdev->acl_pkts;
3883 break;
3884
3885 case LE_LINK:
3886 if (hdev->le_pkts) {
3887 hdev->le_cnt += count;
3888 if (hdev->le_cnt > hdev->le_pkts)
3889 hdev->le_cnt = hdev->le_pkts;
3890 } else {
3891 hdev->acl_cnt += count;
3892 if (hdev->acl_cnt > hdev->acl_pkts)
3893 hdev->acl_cnt = hdev->acl_pkts;
3894 }
3895 break;
3896
3897 case SCO_LINK:
3898 hdev->sco_cnt += count;
3899 if (hdev->sco_cnt > hdev->sco_pkts)
3900 hdev->sco_cnt = hdev->sco_pkts;
3901 break;
3902
3903 default:
3904 bt_dev_err(hdev, "unknown type %d conn %p",
3905 conn->type, conn);
3906 break;
3907 }
3908 }
3909
3910 queue_work(hdev->workqueue, &hdev->tx_work);
3911 }
3912
__hci_conn_lookup_handle(struct hci_dev * hdev,__u16 handle)3913 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3914 __u16 handle)
3915 {
3916 struct hci_chan *chan;
3917
3918 switch (hdev->dev_type) {
3919 case HCI_PRIMARY:
3920 return hci_conn_hash_lookup_handle(hdev, handle);
3921 case HCI_AMP:
3922 chan = hci_chan_lookup_handle(hdev, handle);
3923 if (chan)
3924 return chan->conn;
3925 break;
3926 default:
3927 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3928 break;
3929 }
3930
3931 return NULL;
3932 }
3933
hci_num_comp_blocks_evt(struct hci_dev * hdev,struct sk_buff * skb)3934 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3935 {
3936 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3937 int i;
3938
3939 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3940 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3941 return;
3942 }
3943
3944 if (skb->len < sizeof(*ev) ||
3945 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3946 BT_DBG("%s bad parameters", hdev->name);
3947 return;
3948 }
3949
3950 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3951 ev->num_hndl);
3952
3953 for (i = 0; i < ev->num_hndl; i++) {
3954 struct hci_comp_blocks_info *info = &ev->handles[i];
3955 struct hci_conn *conn = NULL;
3956 __u16 handle, block_count;
3957
3958 handle = __le16_to_cpu(info->handle);
3959 block_count = __le16_to_cpu(info->blocks);
3960
3961 conn = __hci_conn_lookup_handle(hdev, handle);
3962 if (!conn)
3963 continue;
3964
3965 conn->sent -= block_count;
3966
3967 switch (conn->type) {
3968 case ACL_LINK:
3969 case AMP_LINK:
3970 hdev->block_cnt += block_count;
3971 if (hdev->block_cnt > hdev->num_blocks)
3972 hdev->block_cnt = hdev->num_blocks;
3973 break;
3974
3975 default:
3976 bt_dev_err(hdev, "unknown type %d conn %p",
3977 conn->type, conn);
3978 break;
3979 }
3980 }
3981
3982 queue_work(hdev->workqueue, &hdev->tx_work);
3983 }
3984
hci_mode_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3985 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3986 {
3987 struct hci_ev_mode_change *ev = (void *) skb->data;
3988 struct hci_conn *conn;
3989
3990 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3991
3992 hci_dev_lock(hdev);
3993
3994 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3995 if (conn) {
3996 conn->mode = ev->mode;
3997
3998 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3999 &conn->flags)) {
4000 if (conn->mode == HCI_CM_ACTIVE)
4001 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4002 else
4003 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4004 }
4005
4006 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4007 hci_sco_setup(conn, ev->status);
4008 }
4009
4010 hci_dev_unlock(hdev);
4011 }
4012
hci_pin_code_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4013 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4014 {
4015 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4016 struct hci_conn *conn;
4017
4018 BT_DBG("%s", hdev->name);
4019
4020 hci_dev_lock(hdev);
4021
4022 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4023 if (!conn)
4024 goto unlock;
4025
4026 if (conn->state == BT_CONNECTED) {
4027 hci_conn_hold(conn);
4028 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4029 hci_conn_drop(conn);
4030 }
4031
4032 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4033 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4034 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4035 sizeof(ev->bdaddr), &ev->bdaddr);
4036 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4037 u8 secure;
4038
4039 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4040 secure = 1;
4041 else
4042 secure = 0;
4043
4044 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4045 }
4046
4047 unlock:
4048 hci_dev_unlock(hdev);
4049 }
4050
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)4051 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4052 {
4053 if (key_type == HCI_LK_CHANGED_COMBINATION)
4054 return;
4055
4056 conn->pin_length = pin_len;
4057 conn->key_type = key_type;
4058
4059 switch (key_type) {
4060 case HCI_LK_LOCAL_UNIT:
4061 case HCI_LK_REMOTE_UNIT:
4062 case HCI_LK_DEBUG_COMBINATION:
4063 return;
4064 case HCI_LK_COMBINATION:
4065 if (pin_len == 16)
4066 conn->pending_sec_level = BT_SECURITY_HIGH;
4067 else
4068 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4069 break;
4070 case HCI_LK_UNAUTH_COMBINATION_P192:
4071 case HCI_LK_UNAUTH_COMBINATION_P256:
4072 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4073 break;
4074 case HCI_LK_AUTH_COMBINATION_P192:
4075 conn->pending_sec_level = BT_SECURITY_HIGH;
4076 break;
4077 case HCI_LK_AUTH_COMBINATION_P256:
4078 conn->pending_sec_level = BT_SECURITY_FIPS;
4079 break;
4080 }
4081 }
4082
hci_link_key_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4083 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4084 {
4085 struct hci_ev_link_key_req *ev = (void *) skb->data;
4086 struct hci_cp_link_key_reply cp;
4087 struct hci_conn *conn;
4088 struct link_key *key;
4089
4090 BT_DBG("%s", hdev->name);
4091
4092 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4093 return;
4094
4095 hci_dev_lock(hdev);
4096
4097 key = hci_find_link_key(hdev, &ev->bdaddr);
4098 if (!key) {
4099 BT_DBG("%s link key not found for %pMR", hdev->name,
4100 &ev->bdaddr);
4101 goto not_found;
4102 }
4103
4104 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4105 &ev->bdaddr);
4106
4107 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4108 if (conn) {
4109 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4110
4111 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4112 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4113 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4114 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4115 goto not_found;
4116 }
4117
4118 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4119 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4120 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4121 BT_DBG("%s ignoring key unauthenticated for high security",
4122 hdev->name);
4123 goto not_found;
4124 }
4125
4126 conn_set_key(conn, key->type, key->pin_len);
4127 }
4128
4129 bacpy(&cp.bdaddr, &ev->bdaddr);
4130 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4131
4132 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4133
4134 hci_dev_unlock(hdev);
4135
4136 return;
4137
4138 not_found:
4139 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4140 hci_dev_unlock(hdev);
4141 }
4142
hci_link_key_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4143 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4144 {
4145 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4146 struct hci_conn *conn;
4147 struct link_key *key;
4148 bool persistent;
4149 u8 pin_len = 0;
4150
4151 BT_DBG("%s", hdev->name);
4152
4153 hci_dev_lock(hdev);
4154
4155 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4156 if (!conn)
4157 goto unlock;
4158
4159 hci_conn_hold(conn);
4160 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4161 hci_conn_drop(conn);
4162
4163 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4164 conn_set_key(conn, ev->key_type, conn->pin_length);
4165
4166 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4167 goto unlock;
4168
4169 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4170 ev->key_type, pin_len, &persistent);
4171 if (!key)
4172 goto unlock;
4173
4174 /* Update connection information since adding the key will have
4175 * fixed up the type in the case of changed combination keys.
4176 */
4177 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4178 conn_set_key(conn, key->type, key->pin_len);
4179
4180 mgmt_new_link_key(hdev, key, persistent);
4181
4182 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4183 * is set. If it's not set simply remove the key from the kernel
4184 * list (we've still notified user space about it but with
4185 * store_hint being 0).
4186 */
4187 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4188 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4189 list_del_rcu(&key->list);
4190 kfree_rcu(key, rcu);
4191 goto unlock;
4192 }
4193
4194 if (persistent)
4195 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4196 else
4197 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4198
4199 unlock:
4200 hci_dev_unlock(hdev);
4201 }
4202
hci_clock_offset_evt(struct hci_dev * hdev,struct sk_buff * skb)4203 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4204 {
4205 struct hci_ev_clock_offset *ev = (void *) skb->data;
4206 struct hci_conn *conn;
4207
4208 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4209
4210 hci_dev_lock(hdev);
4211
4212 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4213 if (conn && !ev->status) {
4214 struct inquiry_entry *ie;
4215
4216 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4217 if (ie) {
4218 ie->data.clock_offset = ev->clock_offset;
4219 ie->timestamp = jiffies;
4220 }
4221 }
4222
4223 hci_dev_unlock(hdev);
4224 }
4225
hci_pkt_type_change_evt(struct hci_dev * hdev,struct sk_buff * skb)4226 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4227 {
4228 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4229 struct hci_conn *conn;
4230
4231 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4232
4233 hci_dev_lock(hdev);
4234
4235 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4236 if (conn && !ev->status)
4237 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4238
4239 hci_dev_unlock(hdev);
4240 }
4241
hci_pscan_rep_mode_evt(struct hci_dev * hdev,struct sk_buff * skb)4242 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4243 {
4244 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4245 struct inquiry_entry *ie;
4246
4247 BT_DBG("%s", hdev->name);
4248
4249 hci_dev_lock(hdev);
4250
4251 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4252 if (ie) {
4253 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4254 ie->timestamp = jiffies;
4255 }
4256
4257 hci_dev_unlock(hdev);
4258 }
4259
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,struct sk_buff * skb)4260 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4261 struct sk_buff *skb)
4262 {
4263 struct inquiry_data data;
4264 int num_rsp = *((__u8 *) skb->data);
4265
4266 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4267
4268 if (!num_rsp)
4269 return;
4270
4271 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4272 return;
4273
4274 hci_dev_lock(hdev);
4275
4276 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4277 struct inquiry_info_with_rssi_and_pscan_mode *info;
4278 info = (void *) (skb->data + 1);
4279
4280 if (skb->len < num_rsp * sizeof(*info) + 1)
4281 goto unlock;
4282
4283 for (; num_rsp; num_rsp--, info++) {
4284 u32 flags;
4285
4286 bacpy(&data.bdaddr, &info->bdaddr);
4287 data.pscan_rep_mode = info->pscan_rep_mode;
4288 data.pscan_period_mode = info->pscan_period_mode;
4289 data.pscan_mode = info->pscan_mode;
4290 memcpy(data.dev_class, info->dev_class, 3);
4291 data.clock_offset = info->clock_offset;
4292 data.rssi = info->rssi;
4293 data.ssp_mode = 0x00;
4294
4295 flags = hci_inquiry_cache_update(hdev, &data, false);
4296
4297 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4298 info->dev_class, info->rssi,
4299 flags, NULL, 0, NULL, 0);
4300 }
4301 } else {
4302 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4303
4304 if (skb->len < num_rsp * sizeof(*info) + 1)
4305 goto unlock;
4306
4307 for (; num_rsp; num_rsp--, info++) {
4308 u32 flags;
4309
4310 bacpy(&data.bdaddr, &info->bdaddr);
4311 data.pscan_rep_mode = info->pscan_rep_mode;
4312 data.pscan_period_mode = info->pscan_period_mode;
4313 data.pscan_mode = 0x00;
4314 memcpy(data.dev_class, info->dev_class, 3);
4315 data.clock_offset = info->clock_offset;
4316 data.rssi = info->rssi;
4317 data.ssp_mode = 0x00;
4318
4319 flags = hci_inquiry_cache_update(hdev, &data, false);
4320
4321 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4322 info->dev_class, info->rssi,
4323 flags, NULL, 0, NULL, 0);
4324 }
4325 }
4326
4327 unlock:
4328 hci_dev_unlock(hdev);
4329 }
4330
hci_remote_ext_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4331 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4332 struct sk_buff *skb)
4333 {
4334 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4335 struct hci_conn *conn;
4336
4337 BT_DBG("%s", hdev->name);
4338
4339 hci_dev_lock(hdev);
4340
4341 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4342 if (!conn)
4343 goto unlock;
4344
4345 if (ev->page < HCI_MAX_PAGES)
4346 memcpy(conn->features[ev->page], ev->features, 8);
4347
4348 if (!ev->status && ev->page == 0x01) {
4349 struct inquiry_entry *ie;
4350
4351 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4352 if (ie)
4353 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4354
4355 if (ev->features[0] & LMP_HOST_SSP) {
4356 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4357 } else {
4358 /* It is mandatory by the Bluetooth specification that
4359 * Extended Inquiry Results are only used when Secure
4360 * Simple Pairing is enabled, but some devices violate
4361 * this.
4362 *
4363 * To make these devices work, the internal SSP
4364 * enabled flag needs to be cleared if the remote host
4365 * features do not indicate SSP support */
4366 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4367 }
4368
4369 if (ev->features[0] & LMP_HOST_SC)
4370 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4371 }
4372
4373 if (conn->state != BT_CONFIG)
4374 goto unlock;
4375
4376 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4377 struct hci_cp_remote_name_req cp;
4378 memset(&cp, 0, sizeof(cp));
4379 bacpy(&cp.bdaddr, &conn->dst);
4380 cp.pscan_rep_mode = 0x02;
4381 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4382 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4383 mgmt_device_connected(hdev, conn, NULL, 0);
4384
4385 if (!hci_outgoing_auth_needed(hdev, conn)) {
4386 conn->state = BT_CONNECTED;
4387 hci_connect_cfm(conn, ev->status);
4388 hci_conn_drop(conn);
4389 }
4390
4391 unlock:
4392 hci_dev_unlock(hdev);
4393 }
4394
hci_sync_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4395 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4396 struct sk_buff *skb)
4397 {
4398 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4399 struct hci_conn *conn;
4400
4401 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4402
4403 hci_dev_lock(hdev);
4404
4405 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4406 if (!conn) {
4407 if (ev->link_type == ESCO_LINK)
4408 goto unlock;
4409
4410 /* When the link type in the event indicates SCO connection
4411 * and lookup of the connection object fails, then check
4412 * if an eSCO connection object exists.
4413 *
4414 * The core limits the synchronous connections to either
4415 * SCO or eSCO. The eSCO connection is preferred and tried
4416 * to be setup first and until successfully established,
4417 * the link type will be hinted as eSCO.
4418 */
4419 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4420 if (!conn)
4421 goto unlock;
4422 }
4423
4424 switch (ev->status) {
4425 case 0x00:
4426 /* The synchronous connection complete event should only be
4427 * sent once per new connection. Receiving a successful
4428 * complete event when the connection status is already
4429 * BT_CONNECTED means that the device is misbehaving and sent
4430 * multiple complete event packets for the same new connection.
4431 *
4432 * Registering the device more than once can corrupt kernel
4433 * memory, hence upon detecting this invalid event, we report
4434 * an error and ignore the packet.
4435 */
4436 if (conn->state == BT_CONNECTED) {
4437 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4438 goto unlock;
4439 }
4440
4441 conn->handle = __le16_to_cpu(ev->handle);
4442 conn->state = BT_CONNECTED;
4443 conn->type = ev->link_type;
4444
4445 hci_debugfs_create_conn(conn);
4446 hci_conn_add_sysfs(conn);
4447 break;
4448
4449 case 0x10: /* Connection Accept Timeout */
4450 case 0x0d: /* Connection Rejected due to Limited Resources */
4451 case 0x11: /* Unsupported Feature or Parameter Value */
4452 case 0x1c: /* SCO interval rejected */
4453 case 0x1a: /* Unsupported Remote Feature */
4454 case 0x1e: /* Invalid LMP Parameters */
4455 case 0x1f: /* Unspecified error */
4456 case 0x20: /* Unsupported LMP Parameter value */
4457 if (conn->out) {
4458 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4459 (hdev->esco_type & EDR_ESCO_MASK);
4460 if (hci_setup_sync(conn, conn->link->handle))
4461 goto unlock;
4462 }
4463 fallthrough;
4464
4465 default:
4466 conn->state = BT_CLOSED;
4467 break;
4468 }
4469
4470 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4471
4472 switch (ev->air_mode) {
4473 case 0x02:
4474 if (hdev->notify)
4475 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4476 break;
4477 case 0x03:
4478 if (hdev->notify)
4479 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4480 break;
4481 }
4482
4483 hci_connect_cfm(conn, ev->status);
4484 if (ev->status)
4485 hci_conn_del(conn);
4486
4487 unlock:
4488 hci_dev_unlock(hdev);
4489 }
4490
eir_get_length(u8 * eir,size_t eir_len)4491 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4492 {
4493 size_t parsed = 0;
4494
4495 while (parsed < eir_len) {
4496 u8 field_len = eir[0];
4497
4498 if (field_len == 0)
4499 return parsed;
4500
4501 parsed += field_len + 1;
4502 eir += field_len + 1;
4503 }
4504
4505 return eir_len;
4506 }
4507
hci_extended_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)4508 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4509 struct sk_buff *skb)
4510 {
4511 struct inquiry_data data;
4512 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4513 int num_rsp = *((__u8 *) skb->data);
4514 size_t eir_len;
4515
4516 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4517
4518 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4519 return;
4520
4521 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4522 return;
4523
4524 hci_dev_lock(hdev);
4525
4526 for (; num_rsp; num_rsp--, info++) {
4527 u32 flags;
4528 bool name_known;
4529
4530 bacpy(&data.bdaddr, &info->bdaddr);
4531 data.pscan_rep_mode = info->pscan_rep_mode;
4532 data.pscan_period_mode = info->pscan_period_mode;
4533 data.pscan_mode = 0x00;
4534 memcpy(data.dev_class, info->dev_class, 3);
4535 data.clock_offset = info->clock_offset;
4536 data.rssi = info->rssi;
4537 data.ssp_mode = 0x01;
4538
4539 if (hci_dev_test_flag(hdev, HCI_MGMT))
4540 name_known = eir_get_data(info->data,
4541 sizeof(info->data),
4542 EIR_NAME_COMPLETE, NULL);
4543 else
4544 name_known = true;
4545
4546 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4547
4548 eir_len = eir_get_length(info->data, sizeof(info->data));
4549
4550 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4551 info->dev_class, info->rssi,
4552 flags, info->data, eir_len, NULL, 0);
4553 }
4554
4555 hci_dev_unlock(hdev);
4556 }
4557
hci_key_refresh_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4558 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4559 struct sk_buff *skb)
4560 {
4561 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4562 struct hci_conn *conn;
4563
4564 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4565 __le16_to_cpu(ev->handle));
4566
4567 hci_dev_lock(hdev);
4568
4569 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4570 if (!conn)
4571 goto unlock;
4572
4573 /* For BR/EDR the necessary steps are taken through the
4574 * auth_complete event.
4575 */
4576 if (conn->type != LE_LINK)
4577 goto unlock;
4578
4579 if (!ev->status)
4580 conn->sec_level = conn->pending_sec_level;
4581
4582 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4583
4584 if (ev->status && conn->state == BT_CONNECTED) {
4585 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4586 hci_conn_drop(conn);
4587 goto unlock;
4588 }
4589
4590 if (conn->state == BT_CONFIG) {
4591 if (!ev->status)
4592 conn->state = BT_CONNECTED;
4593
4594 hci_connect_cfm(conn, ev->status);
4595 hci_conn_drop(conn);
4596 } else {
4597 hci_auth_cfm(conn, ev->status);
4598
4599 hci_conn_hold(conn);
4600 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4601 hci_conn_drop(conn);
4602 }
4603
4604 unlock:
4605 hci_dev_unlock(hdev);
4606 }
4607
hci_get_auth_req(struct hci_conn * conn)4608 static u8 hci_get_auth_req(struct hci_conn *conn)
4609 {
4610 /* If remote requests no-bonding follow that lead */
4611 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4612 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4613 return conn->remote_auth | (conn->auth_type & 0x01);
4614
4615 /* If both remote and local have enough IO capabilities, require
4616 * MITM protection
4617 */
4618 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4619 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4620 return conn->remote_auth | 0x01;
4621
4622 /* No MITM protection possible so ignore remote requirement */
4623 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4624 }
4625
bredr_oob_data_present(struct hci_conn * conn)4626 static u8 bredr_oob_data_present(struct hci_conn *conn)
4627 {
4628 struct hci_dev *hdev = conn->hdev;
4629 struct oob_data *data;
4630
4631 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4632 if (!data)
4633 return 0x00;
4634
4635 if (bredr_sc_enabled(hdev)) {
4636 /* When Secure Connections is enabled, then just
4637 * return the present value stored with the OOB
4638 * data. The stored value contains the right present
4639 * information. However it can only be trusted when
4640 * not in Secure Connection Only mode.
4641 */
4642 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4643 return data->present;
4644
4645 /* When Secure Connections Only mode is enabled, then
4646 * the P-256 values are required. If they are not
4647 * available, then do not declare that OOB data is
4648 * present.
4649 */
4650 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4651 !memcmp(data->hash256, ZERO_KEY, 16))
4652 return 0x00;
4653
4654 return 0x02;
4655 }
4656
4657 /* When Secure Connections is not enabled or actually
4658 * not supported by the hardware, then check that if
4659 * P-192 data values are present.
4660 */
4661 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4662 !memcmp(data->hash192, ZERO_KEY, 16))
4663 return 0x00;
4664
4665 return 0x01;
4666 }
4667
hci_io_capa_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4668 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4669 {
4670 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4671 struct hci_conn *conn;
4672
4673 BT_DBG("%s", hdev->name);
4674
4675 hci_dev_lock(hdev);
4676
4677 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4678 if (!conn)
4679 goto unlock;
4680
4681 hci_conn_hold(conn);
4682
4683 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4684 goto unlock;
4685
4686 /* Allow pairing if we're pairable, the initiators of the
4687 * pairing or if the remote is not requesting bonding.
4688 */
4689 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4690 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4691 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4692 struct hci_cp_io_capability_reply cp;
4693
4694 bacpy(&cp.bdaddr, &ev->bdaddr);
4695 /* Change the IO capability from KeyboardDisplay
4696 * to DisplayYesNo as it is not supported by BT spec. */
4697 cp.capability = (conn->io_capability == 0x04) ?
4698 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4699
4700 /* If we are initiators, there is no remote information yet */
4701 if (conn->remote_auth == 0xff) {
4702 /* Request MITM protection if our IO caps allow it
4703 * except for the no-bonding case.
4704 */
4705 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4706 conn->auth_type != HCI_AT_NO_BONDING)
4707 conn->auth_type |= 0x01;
4708 } else {
4709 conn->auth_type = hci_get_auth_req(conn);
4710 }
4711
4712 /* If we're not bondable, force one of the non-bondable
4713 * authentication requirement values.
4714 */
4715 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4716 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4717
4718 cp.authentication = conn->auth_type;
4719 cp.oob_data = bredr_oob_data_present(conn);
4720
4721 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4722 sizeof(cp), &cp);
4723 } else {
4724 struct hci_cp_io_capability_neg_reply cp;
4725
4726 bacpy(&cp.bdaddr, &ev->bdaddr);
4727 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4728
4729 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4730 sizeof(cp), &cp);
4731 }
4732
4733 unlock:
4734 hci_dev_unlock(hdev);
4735 }
4736
hci_io_capa_reply_evt(struct hci_dev * hdev,struct sk_buff * skb)4737 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4738 {
4739 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4740 struct hci_conn *conn;
4741
4742 BT_DBG("%s", hdev->name);
4743
4744 hci_dev_lock(hdev);
4745
4746 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4747 if (!conn)
4748 goto unlock;
4749
4750 conn->remote_cap = ev->capability;
4751 conn->remote_auth = ev->authentication;
4752
4753 unlock:
4754 hci_dev_unlock(hdev);
4755 }
4756
hci_user_confirm_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4757 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4758 struct sk_buff *skb)
4759 {
4760 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4761 int loc_mitm, rem_mitm, confirm_hint = 0;
4762 struct hci_conn *conn;
4763
4764 BT_DBG("%s", hdev->name);
4765
4766 hci_dev_lock(hdev);
4767
4768 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4769 goto unlock;
4770
4771 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4772 if (!conn)
4773 goto unlock;
4774
4775 loc_mitm = (conn->auth_type & 0x01);
4776 rem_mitm = (conn->remote_auth & 0x01);
4777
4778 /* If we require MITM but the remote device can't provide that
4779 * (it has NoInputNoOutput) then reject the confirmation
4780 * request. We check the security level here since it doesn't
4781 * necessarily match conn->auth_type.
4782 */
4783 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4784 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4785 BT_DBG("Rejecting request: remote device can't provide MITM");
4786 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4787 sizeof(ev->bdaddr), &ev->bdaddr);
4788 goto unlock;
4789 }
4790
4791 /* If no side requires MITM protection; auto-accept */
4792 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4793 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4794
4795 /* If we're not the initiators request authorization to
4796 * proceed from user space (mgmt_user_confirm with
4797 * confirm_hint set to 1). The exception is if neither
4798 * side had MITM or if the local IO capability is
4799 * NoInputNoOutput, in which case we do auto-accept
4800 */
4801 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4802 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4803 (loc_mitm || rem_mitm)) {
4804 BT_DBG("Confirming auto-accept as acceptor");
4805 confirm_hint = 1;
4806 goto confirm;
4807 }
4808
4809 /* If there already exists link key in local host, leave the
4810 * decision to user space since the remote device could be
4811 * legitimate or malicious.
4812 */
4813 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4814 bt_dev_dbg(hdev, "Local host already has link key");
4815 confirm_hint = 1;
4816 goto confirm;
4817 }
4818
4819 BT_DBG("Auto-accept of user confirmation with %ums delay",
4820 hdev->auto_accept_delay);
4821
4822 if (hdev->auto_accept_delay > 0) {
4823 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4824 queue_delayed_work(conn->hdev->workqueue,
4825 &conn->auto_accept_work, delay);
4826 goto unlock;
4827 }
4828
4829 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4830 sizeof(ev->bdaddr), &ev->bdaddr);
4831 goto unlock;
4832 }
4833
4834 confirm:
4835 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4836 le32_to_cpu(ev->passkey), confirm_hint);
4837
4838 unlock:
4839 hci_dev_unlock(hdev);
4840 }
4841
hci_user_passkey_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4842 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4843 struct sk_buff *skb)
4844 {
4845 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4846
4847 BT_DBG("%s", hdev->name);
4848
4849 if (hci_dev_test_flag(hdev, HCI_MGMT))
4850 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4851 }
4852
hci_user_passkey_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4853 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4854 struct sk_buff *skb)
4855 {
4856 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4857 struct hci_conn *conn;
4858
4859 BT_DBG("%s", hdev->name);
4860
4861 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4862 if (!conn)
4863 return;
4864
4865 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4866 conn->passkey_entered = 0;
4867
4868 if (hci_dev_test_flag(hdev, HCI_MGMT))
4869 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4870 conn->dst_type, conn->passkey_notify,
4871 conn->passkey_entered);
4872 }
4873
hci_keypress_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4874 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4875 {
4876 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4877 struct hci_conn *conn;
4878
4879 BT_DBG("%s", hdev->name);
4880
4881 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4882 if (!conn)
4883 return;
4884
4885 switch (ev->type) {
4886 case HCI_KEYPRESS_STARTED:
4887 conn->passkey_entered = 0;
4888 return;
4889
4890 case HCI_KEYPRESS_ENTERED:
4891 conn->passkey_entered++;
4892 break;
4893
4894 case HCI_KEYPRESS_ERASED:
4895 conn->passkey_entered--;
4896 break;
4897
4898 case HCI_KEYPRESS_CLEARED:
4899 conn->passkey_entered = 0;
4900 break;
4901
4902 case HCI_KEYPRESS_COMPLETED:
4903 return;
4904 }
4905
4906 if (hci_dev_test_flag(hdev, HCI_MGMT))
4907 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4908 conn->dst_type, conn->passkey_notify,
4909 conn->passkey_entered);
4910 }
4911
hci_simple_pair_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4912 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4913 struct sk_buff *skb)
4914 {
4915 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4916 struct hci_conn *conn;
4917
4918 BT_DBG("%s", hdev->name);
4919
4920 hci_dev_lock(hdev);
4921
4922 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4923 if (!conn)
4924 goto unlock;
4925
4926 /* Reset the authentication requirement to unknown */
4927 conn->remote_auth = 0xff;
4928
4929 /* To avoid duplicate auth_failed events to user space we check
4930 * the HCI_CONN_AUTH_PEND flag which will be set if we
4931 * initiated the authentication. A traditional auth_complete
4932 * event gets always produced as initiator and is also mapped to
4933 * the mgmt_auth_failed event */
4934 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4935 mgmt_auth_failed(conn, ev->status);
4936
4937 hci_conn_drop(conn);
4938
4939 unlock:
4940 hci_dev_unlock(hdev);
4941 }
4942
hci_remote_host_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4943 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4944 struct sk_buff *skb)
4945 {
4946 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4947 struct inquiry_entry *ie;
4948 struct hci_conn *conn;
4949
4950 BT_DBG("%s", hdev->name);
4951
4952 hci_dev_lock(hdev);
4953
4954 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4955 if (conn)
4956 memcpy(conn->features[1], ev->features, 8);
4957
4958 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4959 if (ie)
4960 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4961
4962 hci_dev_unlock(hdev);
4963 }
4964
hci_remote_oob_data_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4965 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4966 struct sk_buff *skb)
4967 {
4968 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4969 struct oob_data *data;
4970
4971 BT_DBG("%s", hdev->name);
4972
4973 hci_dev_lock(hdev);
4974
4975 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4976 goto unlock;
4977
4978 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4979 if (!data) {
4980 struct hci_cp_remote_oob_data_neg_reply cp;
4981
4982 bacpy(&cp.bdaddr, &ev->bdaddr);
4983 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4984 sizeof(cp), &cp);
4985 goto unlock;
4986 }
4987
4988 if (bredr_sc_enabled(hdev)) {
4989 struct hci_cp_remote_oob_ext_data_reply cp;
4990
4991 bacpy(&cp.bdaddr, &ev->bdaddr);
4992 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4993 memset(cp.hash192, 0, sizeof(cp.hash192));
4994 memset(cp.rand192, 0, sizeof(cp.rand192));
4995 } else {
4996 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4997 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4998 }
4999 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5000 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5001
5002 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5003 sizeof(cp), &cp);
5004 } else {
5005 struct hci_cp_remote_oob_data_reply cp;
5006
5007 bacpy(&cp.bdaddr, &ev->bdaddr);
5008 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5009 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5010
5011 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5012 sizeof(cp), &cp);
5013 }
5014
5015 unlock:
5016 hci_dev_unlock(hdev);
5017 }
5018
5019 #if IS_ENABLED(CONFIG_BT_HS)
hci_chan_selected_evt(struct hci_dev * hdev,struct sk_buff * skb)5020 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5021 {
5022 struct hci_ev_channel_selected *ev = (void *)skb->data;
5023 struct hci_conn *hcon;
5024
5025 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5026
5027 skb_pull(skb, sizeof(*ev));
5028
5029 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5030 if (!hcon)
5031 return;
5032
5033 amp_read_loc_assoc_final_data(hdev, hcon);
5034 }
5035
hci_phy_link_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5036 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5037 struct sk_buff *skb)
5038 {
5039 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5040 struct hci_conn *hcon, *bredr_hcon;
5041
5042 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5043 ev->status);
5044
5045 hci_dev_lock(hdev);
5046
5047 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5048 if (!hcon)
5049 goto unlock;
5050
5051 if (!hcon->amp_mgr)
5052 goto unlock;
5053
5054 if (ev->status) {
5055 hci_conn_del(hcon);
5056 goto unlock;
5057 }
5058
5059 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5060
5061 hcon->state = BT_CONNECTED;
5062 bacpy(&hcon->dst, &bredr_hcon->dst);
5063
5064 hci_conn_hold(hcon);
5065 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5066 hci_conn_drop(hcon);
5067
5068 hci_debugfs_create_conn(hcon);
5069 hci_conn_add_sysfs(hcon);
5070
5071 amp_physical_cfm(bredr_hcon, hcon);
5072
5073 unlock:
5074 hci_dev_unlock(hdev);
5075 }
5076
hci_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5077 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5078 {
5079 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5080 struct hci_conn *hcon;
5081 struct hci_chan *hchan;
5082 struct amp_mgr *mgr;
5083
5084 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5085 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5086 ev->status);
5087
5088 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5089 if (!hcon)
5090 return;
5091
5092 /* Create AMP hchan */
5093 hchan = hci_chan_create(hcon);
5094 if (!hchan)
5095 return;
5096
5097 hchan->handle = le16_to_cpu(ev->handle);
5098 hchan->amp = true;
5099
5100 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5101
5102 mgr = hcon->amp_mgr;
5103 if (mgr && mgr->bredr_chan) {
5104 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5105
5106 l2cap_chan_lock(bredr_chan);
5107
5108 bredr_chan->conn->mtu = hdev->block_mtu;
5109 l2cap_logical_cfm(bredr_chan, hchan, 0);
5110 hci_conn_hold(hcon);
5111
5112 l2cap_chan_unlock(bredr_chan);
5113 }
5114 }
5115
hci_disconn_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5116 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5117 struct sk_buff *skb)
5118 {
5119 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5120 struct hci_chan *hchan;
5121
5122 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5123 le16_to_cpu(ev->handle), ev->status);
5124
5125 if (ev->status)
5126 return;
5127
5128 hci_dev_lock(hdev);
5129
5130 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5131 if (!hchan || !hchan->amp)
5132 goto unlock;
5133
5134 amp_destroy_logical_link(hchan, ev->reason);
5135
5136 unlock:
5137 hci_dev_unlock(hdev);
5138 }
5139
hci_disconn_phylink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5140 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5141 struct sk_buff *skb)
5142 {
5143 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5144 struct hci_conn *hcon;
5145
5146 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5147
5148 if (ev->status)
5149 return;
5150
5151 hci_dev_lock(hdev);
5152
5153 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5154 if (hcon) {
5155 hcon->state = BT_CLOSED;
5156 hci_conn_del(hcon);
5157 }
5158
5159 hci_dev_unlock(hdev);
5160 }
5161 #endif
5162
le_conn_update_addr(struct hci_conn * conn,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa)5163 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5164 u8 bdaddr_type, bdaddr_t *local_rpa)
5165 {
5166 if (conn->out) {
5167 conn->dst_type = bdaddr_type;
5168 conn->resp_addr_type = bdaddr_type;
5169 bacpy(&conn->resp_addr, bdaddr);
5170
5171 /* Check if the controller has set a Local RPA then it must be
5172 * used instead or hdev->rpa.
5173 */
5174 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5175 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5176 bacpy(&conn->init_addr, local_rpa);
5177 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5178 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5179 bacpy(&conn->init_addr, &conn->hdev->rpa);
5180 } else {
5181 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5182 &conn->init_addr_type);
5183 }
5184 } else {
5185 conn->resp_addr_type = conn->hdev->adv_addr_type;
5186 /* Check if the controller has set a Local RPA then it must be
5187 * used instead or hdev->rpa.
5188 */
5189 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5190 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5191 bacpy(&conn->resp_addr, local_rpa);
5192 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5193 /* In case of ext adv, resp_addr will be updated in
5194 * Adv Terminated event.
5195 */
5196 if (!ext_adv_capable(conn->hdev))
5197 bacpy(&conn->resp_addr,
5198 &conn->hdev->random_addr);
5199 } else {
5200 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5201 }
5202
5203 conn->init_addr_type = bdaddr_type;
5204 bacpy(&conn->init_addr, bdaddr);
5205
5206 /* For incoming connections, set the default minimum
5207 * and maximum connection interval. They will be used
5208 * to check if the parameters are in range and if not
5209 * trigger the connection update procedure.
5210 */
5211 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5212 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5213 }
5214 }
5215
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)5216 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5217 bdaddr_t *bdaddr, u8 bdaddr_type,
5218 bdaddr_t *local_rpa, u8 role, u16 handle,
5219 u16 interval, u16 latency,
5220 u16 supervision_timeout)
5221 {
5222 struct hci_conn_params *params;
5223 struct hci_conn *conn;
5224 struct smp_irk *irk;
5225 u8 addr_type;
5226
5227 hci_dev_lock(hdev);
5228
5229 /* All controllers implicitly stop advertising in the event of a
5230 * connection, so ensure that the state bit is cleared.
5231 */
5232 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5233
5234 conn = hci_lookup_le_connect(hdev);
5235 if (!conn) {
5236 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5237 if (!conn) {
5238 bt_dev_err(hdev, "no memory for new connection");
5239 goto unlock;
5240 }
5241
5242 conn->dst_type = bdaddr_type;
5243
5244 /* If we didn't have a hci_conn object previously
5245 * but we're in central role this must be something
5246 * initiated using an accept list. Since accept list based
5247 * connections are not "first class citizens" we don't
5248 * have full tracking of them. Therefore, we go ahead
5249 * with a "best effort" approach of determining the
5250 * initiator address based on the HCI_PRIVACY flag.
5251 */
5252 if (conn->out) {
5253 conn->resp_addr_type = bdaddr_type;
5254 bacpy(&conn->resp_addr, bdaddr);
5255 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5256 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5257 bacpy(&conn->init_addr, &hdev->rpa);
5258 } else {
5259 hci_copy_identity_address(hdev,
5260 &conn->init_addr,
5261 &conn->init_addr_type);
5262 }
5263 }
5264 } else {
5265 cancel_delayed_work(&conn->le_conn_timeout);
5266 }
5267
5268 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5269
5270 /* Lookup the identity address from the stored connection
5271 * address and address type.
5272 *
5273 * When establishing connections to an identity address, the
5274 * connection procedure will store the resolvable random
5275 * address first. Now if it can be converted back into the
5276 * identity address, start using the identity address from
5277 * now on.
5278 */
5279 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5280 if (irk) {
5281 bacpy(&conn->dst, &irk->bdaddr);
5282 conn->dst_type = irk->addr_type;
5283 }
5284
5285 /* When using controller based address resolution, then the new
5286 * address types 0x02 and 0x03 are used. These types need to be
5287 * converted back into either public address or random address type
5288 */
5289 if (use_ll_privacy(hdev) &&
5290 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5291 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5292 switch (conn->dst_type) {
5293 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5294 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5295 break;
5296 case ADDR_LE_DEV_RANDOM_RESOLVED:
5297 conn->dst_type = ADDR_LE_DEV_RANDOM;
5298 break;
5299 }
5300 }
5301
5302 if (status) {
5303 hci_le_conn_failed(conn, status);
5304 goto unlock;
5305 }
5306
5307 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5308 addr_type = BDADDR_LE_PUBLIC;
5309 else
5310 addr_type = BDADDR_LE_RANDOM;
5311
5312 /* Drop the connection if the device is blocked */
5313 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5314 hci_conn_drop(conn);
5315 goto unlock;
5316 }
5317
5318 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5319 mgmt_device_connected(hdev, conn, NULL, 0);
5320
5321 conn->sec_level = BT_SECURITY_LOW;
5322 conn->handle = handle;
5323 conn->state = BT_CONFIG;
5324
5325 /* Store current advertising instance as connection advertising instance
5326 * when sotfware rotation is in use so it can be re-enabled when
5327 * disconnected.
5328 */
5329 if (!ext_adv_capable(hdev))
5330 conn->adv_instance = hdev->cur_adv_instance;
5331
5332 conn->le_conn_interval = interval;
5333 conn->le_conn_latency = latency;
5334 conn->le_supv_timeout = supervision_timeout;
5335
5336 hci_debugfs_create_conn(conn);
5337 hci_conn_add_sysfs(conn);
5338
5339 /* The remote features procedure is defined for central
5340 * role only. So only in case of an initiated connection
5341 * request the remote features.
5342 *
5343 * If the local controller supports peripheral-initiated features
5344 * exchange, then requesting the remote features in peripheral
5345 * role is possible. Otherwise just transition into the
5346 * connected state without requesting the remote features.
5347 */
5348 if (conn->out ||
5349 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5350 struct hci_cp_le_read_remote_features cp;
5351
5352 cp.handle = __cpu_to_le16(conn->handle);
5353
5354 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5355 sizeof(cp), &cp);
5356
5357 hci_conn_hold(conn);
5358 } else {
5359 conn->state = BT_CONNECTED;
5360 hci_connect_cfm(conn, status);
5361 }
5362
5363 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5364 conn->dst_type);
5365 if (params) {
5366 list_del_init(¶ms->action);
5367 if (params->conn) {
5368 hci_conn_drop(params->conn);
5369 hci_conn_put(params->conn);
5370 params->conn = NULL;
5371 }
5372 }
5373
5374 unlock:
5375 hci_update_background_scan(hdev);
5376 hci_dev_unlock(hdev);
5377 }
5378
hci_le_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5379 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5380 {
5381 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5382
5383 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5384
5385 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5386 NULL, ev->role, le16_to_cpu(ev->handle),
5387 le16_to_cpu(ev->interval),
5388 le16_to_cpu(ev->latency),
5389 le16_to_cpu(ev->supervision_timeout));
5390 }
5391
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5392 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5393 struct sk_buff *skb)
5394 {
5395 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5396
5397 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5398
5399 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5400 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5401 le16_to_cpu(ev->interval),
5402 le16_to_cpu(ev->latency),
5403 le16_to_cpu(ev->supervision_timeout));
5404
5405 if (use_ll_privacy(hdev) &&
5406 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5407 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5408 hci_req_disable_address_resolution(hdev);
5409 }
5410
hci_le_ext_adv_term_evt(struct hci_dev * hdev,struct sk_buff * skb)5411 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5412 {
5413 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5414 struct hci_conn *conn;
5415 struct adv_info *adv;
5416
5417 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5418
5419 adv = hci_find_adv_instance(hdev, ev->handle);
5420
5421 if (ev->status) {
5422 if (!adv)
5423 return;
5424
5425 /* Remove advertising as it has been terminated */
5426 hci_remove_adv_instance(hdev, ev->handle);
5427 mgmt_advertising_removed(NULL, hdev, ev->handle);
5428
5429 return;
5430 }
5431
5432 if (adv)
5433 adv->enabled = false;
5434
5435 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5436 if (conn) {
5437 /* Store handle in the connection so the correct advertising
5438 * instance can be re-enabled when disconnected.
5439 */
5440 conn->adv_instance = ev->handle;
5441
5442 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5443 bacmp(&conn->resp_addr, BDADDR_ANY))
5444 return;
5445
5446 if (!ev->handle) {
5447 bacpy(&conn->resp_addr, &hdev->random_addr);
5448 return;
5449 }
5450
5451 if (adv)
5452 bacpy(&conn->resp_addr, &adv->random_addr);
5453 }
5454 }
5455
hci_le_conn_update_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5456 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5457 struct sk_buff *skb)
5458 {
5459 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5460 struct hci_conn *conn;
5461
5462 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5463
5464 if (ev->status)
5465 return;
5466
5467 hci_dev_lock(hdev);
5468
5469 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5470 if (conn) {
5471 conn->le_conn_interval = le16_to_cpu(ev->interval);
5472 conn->le_conn_latency = le16_to_cpu(ev->latency);
5473 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5474 }
5475
5476 hci_dev_unlock(hdev);
5477 }
5478
5479 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 adv_type,bdaddr_t * direct_rpa)5480 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5481 bdaddr_t *addr,
5482 u8 addr_type, u8 adv_type,
5483 bdaddr_t *direct_rpa)
5484 {
5485 struct hci_conn *conn;
5486 struct hci_conn_params *params;
5487
5488 /* If the event is not connectable don't proceed further */
5489 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5490 return NULL;
5491
5492 /* Ignore if the device is blocked */
5493 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5494 return NULL;
5495
5496 /* Most controller will fail if we try to create new connections
5497 * while we have an existing one in peripheral role.
5498 */
5499 if (hdev->conn_hash.le_num_peripheral > 0 &&
5500 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5501 !(hdev->le_states[3] & 0x10)))
5502 return NULL;
5503
5504 /* If we're not connectable only connect devices that we have in
5505 * our pend_le_conns list.
5506 */
5507 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5508 addr_type);
5509 if (!params)
5510 return NULL;
5511
5512 if (!params->explicit_connect) {
5513 switch (params->auto_connect) {
5514 case HCI_AUTO_CONN_DIRECT:
5515 /* Only devices advertising with ADV_DIRECT_IND are
5516 * triggering a connection attempt. This is allowing
5517 * incoming connections from peripheral devices.
5518 */
5519 if (adv_type != LE_ADV_DIRECT_IND)
5520 return NULL;
5521 break;
5522 case HCI_AUTO_CONN_ALWAYS:
5523 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5524 * are triggering a connection attempt. This means
5525 * that incoming connections from peripheral device are
5526 * accepted and also outgoing connections to peripheral
5527 * devices are established when found.
5528 */
5529 break;
5530 default:
5531 return NULL;
5532 }
5533 }
5534
5535 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5536 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5537 direct_rpa);
5538 if (!IS_ERR(conn)) {
5539 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5540 * by higher layer that tried to connect, if no then
5541 * store the pointer since we don't really have any
5542 * other owner of the object besides the params that
5543 * triggered it. This way we can abort the connection if
5544 * the parameters get removed and keep the reference
5545 * count consistent once the connection is established.
5546 */
5547
5548 if (!params->explicit_connect)
5549 params->conn = hci_conn_get(conn);
5550
5551 return conn;
5552 }
5553
5554 switch (PTR_ERR(conn)) {
5555 case -EBUSY:
5556 /* If hci_connect() returns -EBUSY it means there is already
5557 * an LE connection attempt going on. Since controllers don't
5558 * support more than one connection attempt at the time, we
5559 * don't consider this an error case.
5560 */
5561 break;
5562 default:
5563 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5564 return NULL;
5565 }
5566
5567 return NULL;
5568 }
5569
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,s8 rssi,u8 * data,u8 len,bool ext_adv)5570 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5571 u8 bdaddr_type, bdaddr_t *direct_addr,
5572 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5573 bool ext_adv)
5574 {
5575 struct discovery_state *d = &hdev->discovery;
5576 struct smp_irk *irk;
5577 struct hci_conn *conn;
5578 bool match;
5579 u32 flags;
5580 u8 *ptr;
5581
5582 switch (type) {
5583 case LE_ADV_IND:
5584 case LE_ADV_DIRECT_IND:
5585 case LE_ADV_SCAN_IND:
5586 case LE_ADV_NONCONN_IND:
5587 case LE_ADV_SCAN_RSP:
5588 break;
5589 default:
5590 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5591 "type: 0x%02x", type);
5592 return;
5593 }
5594
5595 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5596 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5597 return;
5598 }
5599
5600 /* Find the end of the data in case the report contains padded zero
5601 * bytes at the end causing an invalid length value.
5602 *
5603 * When data is NULL, len is 0 so there is no need for extra ptr
5604 * check as 'ptr < data + 0' is already false in such case.
5605 */
5606 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5607 if (ptr + 1 + *ptr > data + len)
5608 break;
5609 }
5610
5611 /* Adjust for actual length. This handles the case when remote
5612 * device is advertising with incorrect data length.
5613 */
5614 len = ptr - data;
5615
5616 /* If the direct address is present, then this report is from
5617 * a LE Direct Advertising Report event. In that case it is
5618 * important to see if the address is matching the local
5619 * controller address.
5620 */
5621 if (direct_addr) {
5622 /* Only resolvable random addresses are valid for these
5623 * kind of reports and others can be ignored.
5624 */
5625 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5626 return;
5627
5628 /* If the controller is not using resolvable random
5629 * addresses, then this report can be ignored.
5630 */
5631 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5632 return;
5633
5634 /* If the local IRK of the controller does not match
5635 * with the resolvable random address provided, then
5636 * this report can be ignored.
5637 */
5638 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5639 return;
5640 }
5641
5642 /* Check if we need to convert to identity address */
5643 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5644 if (irk) {
5645 bdaddr = &irk->bdaddr;
5646 bdaddr_type = irk->addr_type;
5647 }
5648
5649 /* Check if we have been requested to connect to this device.
5650 *
5651 * direct_addr is set only for directed advertising reports (it is NULL
5652 * for advertising reports) and is already verified to be RPA above.
5653 */
5654 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5655 direct_addr);
5656 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5657 /* Store report for later inclusion by
5658 * mgmt_device_connected
5659 */
5660 memcpy(conn->le_adv_data, data, len);
5661 conn->le_adv_data_len = len;
5662 }
5663
5664 /* Passive scanning shouldn't trigger any device found events,
5665 * except for devices marked as CONN_REPORT for which we do send
5666 * device found events, or advertisement monitoring requested.
5667 */
5668 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5669 if (type == LE_ADV_DIRECT_IND)
5670 return;
5671
5672 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5673 bdaddr, bdaddr_type) &&
5674 idr_is_empty(&hdev->adv_monitors_idr))
5675 return;
5676
5677 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5678 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5679 else
5680 flags = 0;
5681 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5682 rssi, flags, data, len, NULL, 0);
5683 return;
5684 }
5685
5686 /* When receiving non-connectable or scannable undirected
5687 * advertising reports, this means that the remote device is
5688 * not connectable and then clearly indicate this in the
5689 * device found event.
5690 *
5691 * When receiving a scan response, then there is no way to
5692 * know if the remote device is connectable or not. However
5693 * since scan responses are merged with a previously seen
5694 * advertising report, the flags field from that report
5695 * will be used.
5696 *
5697 * In the really unlikely case that a controller get confused
5698 * and just sends a scan response event, then it is marked as
5699 * not connectable as well.
5700 */
5701 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5702 type == LE_ADV_SCAN_RSP)
5703 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5704 else
5705 flags = 0;
5706
5707 /* If there's nothing pending either store the data from this
5708 * event or send an immediate device found event if the data
5709 * should not be stored for later.
5710 */
5711 if (!ext_adv && !has_pending_adv_report(hdev)) {
5712 /* If the report will trigger a SCAN_REQ store it for
5713 * later merging.
5714 */
5715 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5716 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5717 rssi, flags, data, len);
5718 return;
5719 }
5720
5721 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5722 rssi, flags, data, len, NULL, 0);
5723 return;
5724 }
5725
5726 /* Check if the pending report is for the same device as the new one */
5727 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5728 bdaddr_type == d->last_adv_addr_type);
5729
5730 /* If the pending data doesn't match this report or this isn't a
5731 * scan response (e.g. we got a duplicate ADV_IND) then force
5732 * sending of the pending data.
5733 */
5734 if (type != LE_ADV_SCAN_RSP || !match) {
5735 /* Send out whatever is in the cache, but skip duplicates */
5736 if (!match)
5737 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5738 d->last_adv_addr_type, NULL,
5739 d->last_adv_rssi, d->last_adv_flags,
5740 d->last_adv_data,
5741 d->last_adv_data_len, NULL, 0);
5742
5743 /* If the new report will trigger a SCAN_REQ store it for
5744 * later merging.
5745 */
5746 if (!ext_adv && (type == LE_ADV_IND ||
5747 type == LE_ADV_SCAN_IND)) {
5748 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5749 rssi, flags, data, len);
5750 return;
5751 }
5752
5753 /* The advertising reports cannot be merged, so clear
5754 * the pending report and send out a device found event.
5755 */
5756 clear_pending_adv_report(hdev);
5757 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5758 rssi, flags, data, len, NULL, 0);
5759 return;
5760 }
5761
5762 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5763 * the new event is a SCAN_RSP. We can therefore proceed with
5764 * sending a merged device found event.
5765 */
5766 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5767 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5768 d->last_adv_data, d->last_adv_data_len, data, len);
5769 clear_pending_adv_report(hdev);
5770 }
5771
hci_le_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5772 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5773 {
5774 u8 num_reports = skb->data[0];
5775 void *ptr = &skb->data[1];
5776
5777 hci_dev_lock(hdev);
5778
5779 while (num_reports--) {
5780 struct hci_ev_le_advertising_info *ev = ptr;
5781 s8 rssi;
5782
5783 if (ev->length <= HCI_MAX_AD_LENGTH) {
5784 rssi = ev->data[ev->length];
5785 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5786 ev->bdaddr_type, NULL, 0, rssi,
5787 ev->data, ev->length, false);
5788 } else {
5789 bt_dev_err(hdev, "Dropping invalid advertising data");
5790 }
5791
5792 ptr += sizeof(*ev) + ev->length + 1;
5793 }
5794
5795 hci_dev_unlock(hdev);
5796 }
5797
ext_evt_type_to_legacy(struct hci_dev * hdev,u16 evt_type)5798 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5799 {
5800 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5801 switch (evt_type) {
5802 case LE_LEGACY_ADV_IND:
5803 return LE_ADV_IND;
5804 case LE_LEGACY_ADV_DIRECT_IND:
5805 return LE_ADV_DIRECT_IND;
5806 case LE_LEGACY_ADV_SCAN_IND:
5807 return LE_ADV_SCAN_IND;
5808 case LE_LEGACY_NONCONN_IND:
5809 return LE_ADV_NONCONN_IND;
5810 case LE_LEGACY_SCAN_RSP_ADV:
5811 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5812 return LE_ADV_SCAN_RSP;
5813 }
5814
5815 goto invalid;
5816 }
5817
5818 if (evt_type & LE_EXT_ADV_CONN_IND) {
5819 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5820 return LE_ADV_DIRECT_IND;
5821
5822 return LE_ADV_IND;
5823 }
5824
5825 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5826 return LE_ADV_SCAN_RSP;
5827
5828 if (evt_type & LE_EXT_ADV_SCAN_IND)
5829 return LE_ADV_SCAN_IND;
5830
5831 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5832 evt_type & LE_EXT_ADV_DIRECT_IND)
5833 return LE_ADV_NONCONN_IND;
5834
5835 invalid:
5836 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5837 evt_type);
5838
5839 return LE_ADV_INVALID;
5840 }
5841
hci_le_ext_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5842 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5843 {
5844 u8 num_reports = skb->data[0];
5845 void *ptr = &skb->data[1];
5846
5847 hci_dev_lock(hdev);
5848
5849 while (num_reports--) {
5850 struct hci_ev_le_ext_adv_report *ev = ptr;
5851 u8 legacy_evt_type;
5852 u16 evt_type;
5853
5854 evt_type = __le16_to_cpu(ev->evt_type);
5855 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5856 if (legacy_evt_type != LE_ADV_INVALID) {
5857 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5858 ev->bdaddr_type, NULL, 0, ev->rssi,
5859 ev->data, ev->length,
5860 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5861 }
5862
5863 ptr += sizeof(*ev) + ev->length;
5864 }
5865
5866 hci_dev_unlock(hdev);
5867 }
5868
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5869 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5870 struct sk_buff *skb)
5871 {
5872 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5873 struct hci_conn *conn;
5874
5875 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5876
5877 hci_dev_lock(hdev);
5878
5879 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5880 if (conn) {
5881 if (!ev->status)
5882 memcpy(conn->features[0], ev->features, 8);
5883
5884 if (conn->state == BT_CONFIG) {
5885 __u8 status;
5886
5887 /* If the local controller supports peripheral-initiated
5888 * features exchange, but the remote controller does
5889 * not, then it is possible that the error code 0x1a
5890 * for unsupported remote feature gets returned.
5891 *
5892 * In this specific case, allow the connection to
5893 * transition into connected state and mark it as
5894 * successful.
5895 */
5896 if (!conn->out && ev->status == 0x1a &&
5897 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
5898 status = 0x00;
5899 else
5900 status = ev->status;
5901
5902 conn->state = BT_CONNECTED;
5903 hci_connect_cfm(conn, status);
5904 hci_conn_drop(conn);
5905 }
5906 }
5907
5908 hci_dev_unlock(hdev);
5909 }
5910
hci_le_ltk_request_evt(struct hci_dev * hdev,struct sk_buff * skb)5911 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5912 {
5913 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5914 struct hci_cp_le_ltk_reply cp;
5915 struct hci_cp_le_ltk_neg_reply neg;
5916 struct hci_conn *conn;
5917 struct smp_ltk *ltk;
5918
5919 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5920
5921 hci_dev_lock(hdev);
5922
5923 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5924 if (conn == NULL)
5925 goto not_found;
5926
5927 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5928 if (!ltk)
5929 goto not_found;
5930
5931 if (smp_ltk_is_sc(ltk)) {
5932 /* With SC both EDiv and Rand are set to zero */
5933 if (ev->ediv || ev->rand)
5934 goto not_found;
5935 } else {
5936 /* For non-SC keys check that EDiv and Rand match */
5937 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5938 goto not_found;
5939 }
5940
5941 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5942 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5943 cp.handle = cpu_to_le16(conn->handle);
5944
5945 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5946
5947 conn->enc_key_size = ltk->enc_size;
5948
5949 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5950
5951 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5952 * temporary key used to encrypt a connection following
5953 * pairing. It is used during the Encrypted Session Setup to
5954 * distribute the keys. Later, security can be re-established
5955 * using a distributed LTK.
5956 */
5957 if (ltk->type == SMP_STK) {
5958 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5959 list_del_rcu(<k->list);
5960 kfree_rcu(ltk, rcu);
5961 } else {
5962 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5963 }
5964
5965 hci_dev_unlock(hdev);
5966
5967 return;
5968
5969 not_found:
5970 neg.handle = ev->handle;
5971 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5972 hci_dev_unlock(hdev);
5973 }
5974
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)5975 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5976 u8 reason)
5977 {
5978 struct hci_cp_le_conn_param_req_neg_reply cp;
5979
5980 cp.handle = cpu_to_le16(handle);
5981 cp.reason = reason;
5982
5983 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5984 &cp);
5985 }
5986
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,struct sk_buff * skb)5987 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5988 struct sk_buff *skb)
5989 {
5990 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5991 struct hci_cp_le_conn_param_req_reply cp;
5992 struct hci_conn *hcon;
5993 u16 handle, min, max, latency, timeout;
5994
5995 handle = le16_to_cpu(ev->handle);
5996 min = le16_to_cpu(ev->interval_min);
5997 max = le16_to_cpu(ev->interval_max);
5998 latency = le16_to_cpu(ev->latency);
5999 timeout = le16_to_cpu(ev->timeout);
6000
6001 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6002 if (!hcon || hcon->state != BT_CONNECTED)
6003 return send_conn_param_neg_reply(hdev, handle,
6004 HCI_ERROR_UNKNOWN_CONN_ID);
6005
6006 if (hci_check_conn_params(min, max, latency, timeout))
6007 return send_conn_param_neg_reply(hdev, handle,
6008 HCI_ERROR_INVALID_LL_PARAMS);
6009
6010 if (hcon->role == HCI_ROLE_MASTER) {
6011 struct hci_conn_params *params;
6012 u8 store_hint;
6013
6014 hci_dev_lock(hdev);
6015
6016 params = hci_conn_params_lookup(hdev, &hcon->dst,
6017 hcon->dst_type);
6018 if (params) {
6019 params->conn_min_interval = min;
6020 params->conn_max_interval = max;
6021 params->conn_latency = latency;
6022 params->supervision_timeout = timeout;
6023 store_hint = 0x01;
6024 } else {
6025 store_hint = 0x00;
6026 }
6027
6028 hci_dev_unlock(hdev);
6029
6030 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6031 store_hint, min, max, latency, timeout);
6032 }
6033
6034 cp.handle = ev->handle;
6035 cp.interval_min = ev->interval_min;
6036 cp.interval_max = ev->interval_max;
6037 cp.latency = ev->latency;
6038 cp.timeout = ev->timeout;
6039 cp.min_ce_len = 0;
6040 cp.max_ce_len = 0;
6041
6042 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6043 }
6044
hci_le_direct_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)6045 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6046 struct sk_buff *skb)
6047 {
6048 u8 num_reports = skb->data[0];
6049 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6050
6051 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6052 return;
6053
6054 hci_dev_lock(hdev);
6055
6056 for (; num_reports; num_reports--, ev++)
6057 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6058 ev->bdaddr_type, &ev->direct_addr,
6059 ev->direct_addr_type, ev->rssi, NULL, 0,
6060 false);
6061
6062 hci_dev_unlock(hdev);
6063 }
6064
hci_le_phy_update_evt(struct hci_dev * hdev,struct sk_buff * skb)6065 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6066 {
6067 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6068 struct hci_conn *conn;
6069
6070 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6071
6072 if (ev->status)
6073 return;
6074
6075 hci_dev_lock(hdev);
6076
6077 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6078 if (!conn)
6079 goto unlock;
6080
6081 conn->le_tx_phy = ev->tx_phy;
6082 conn->le_rx_phy = ev->rx_phy;
6083
6084 unlock:
6085 hci_dev_unlock(hdev);
6086 }
6087
hci_le_meta_evt(struct hci_dev * hdev,struct sk_buff * skb)6088 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6089 {
6090 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6091
6092 skb_pull(skb, sizeof(*le_ev));
6093
6094 switch (le_ev->subevent) {
6095 case HCI_EV_LE_CONN_COMPLETE:
6096 hci_le_conn_complete_evt(hdev, skb);
6097 break;
6098
6099 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6100 hci_le_conn_update_complete_evt(hdev, skb);
6101 break;
6102
6103 case HCI_EV_LE_ADVERTISING_REPORT:
6104 hci_le_adv_report_evt(hdev, skb);
6105 break;
6106
6107 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6108 hci_le_remote_feat_complete_evt(hdev, skb);
6109 break;
6110
6111 case HCI_EV_LE_LTK_REQ:
6112 hci_le_ltk_request_evt(hdev, skb);
6113 break;
6114
6115 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6116 hci_le_remote_conn_param_req_evt(hdev, skb);
6117 break;
6118
6119 case HCI_EV_LE_DIRECT_ADV_REPORT:
6120 hci_le_direct_adv_report_evt(hdev, skb);
6121 break;
6122
6123 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6124 hci_le_phy_update_evt(hdev, skb);
6125 break;
6126
6127 case HCI_EV_LE_EXT_ADV_REPORT:
6128 hci_le_ext_adv_report_evt(hdev, skb);
6129 break;
6130
6131 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6132 hci_le_enh_conn_complete_evt(hdev, skb);
6133 break;
6134
6135 case HCI_EV_LE_EXT_ADV_SET_TERM:
6136 hci_le_ext_adv_term_evt(hdev, skb);
6137 break;
6138
6139 default:
6140 break;
6141 }
6142 }
6143
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)6144 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6145 u8 event, struct sk_buff *skb)
6146 {
6147 struct hci_ev_cmd_complete *ev;
6148 struct hci_event_hdr *hdr;
6149
6150 if (!skb)
6151 return false;
6152
6153 if (skb->len < sizeof(*hdr)) {
6154 bt_dev_err(hdev, "too short HCI event");
6155 return false;
6156 }
6157
6158 hdr = (void *) skb->data;
6159 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6160
6161 if (event) {
6162 if (hdr->evt != event)
6163 return false;
6164 return true;
6165 }
6166
6167 /* Check if request ended in Command Status - no way to retrieve
6168 * any extra parameters in this case.
6169 */
6170 if (hdr->evt == HCI_EV_CMD_STATUS)
6171 return false;
6172
6173 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6174 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6175 hdr->evt);
6176 return false;
6177 }
6178
6179 if (skb->len < sizeof(*ev)) {
6180 bt_dev_err(hdev, "too short cmd_complete event");
6181 return false;
6182 }
6183
6184 ev = (void *) skb->data;
6185 skb_pull(skb, sizeof(*ev));
6186
6187 if (opcode != __le16_to_cpu(ev->opcode)) {
6188 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6189 __le16_to_cpu(ev->opcode));
6190 return false;
6191 }
6192
6193 return true;
6194 }
6195
hci_store_wake_reason(struct hci_dev * hdev,u8 event,struct sk_buff * skb)6196 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6197 struct sk_buff *skb)
6198 {
6199 struct hci_ev_le_advertising_info *adv;
6200 struct hci_ev_le_direct_adv_info *direct_adv;
6201 struct hci_ev_le_ext_adv_report *ext_adv;
6202 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6203 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6204
6205 hci_dev_lock(hdev);
6206
6207 /* If we are currently suspended and this is the first BT event seen,
6208 * save the wake reason associated with the event.
6209 */
6210 if (!hdev->suspended || hdev->wake_reason)
6211 goto unlock;
6212
6213 /* Default to remote wake. Values for wake_reason are documented in the
6214 * Bluez mgmt api docs.
6215 */
6216 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6217
6218 /* Once configured for remote wakeup, we should only wake up for
6219 * reconnections. It's useful to see which device is waking us up so
6220 * keep track of the bdaddr of the connection event that woke us up.
6221 */
6222 if (event == HCI_EV_CONN_REQUEST) {
6223 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6224 hdev->wake_addr_type = BDADDR_BREDR;
6225 } else if (event == HCI_EV_CONN_COMPLETE) {
6226 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6227 hdev->wake_addr_type = BDADDR_BREDR;
6228 } else if (event == HCI_EV_LE_META) {
6229 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6230 u8 subevent = le_ev->subevent;
6231 u8 *ptr = &skb->data[sizeof(*le_ev)];
6232 u8 num_reports = *ptr;
6233
6234 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6235 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6236 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6237 num_reports) {
6238 adv = (void *)(ptr + 1);
6239 direct_adv = (void *)(ptr + 1);
6240 ext_adv = (void *)(ptr + 1);
6241
6242 switch (subevent) {
6243 case HCI_EV_LE_ADVERTISING_REPORT:
6244 bacpy(&hdev->wake_addr, &adv->bdaddr);
6245 hdev->wake_addr_type = adv->bdaddr_type;
6246 break;
6247 case HCI_EV_LE_DIRECT_ADV_REPORT:
6248 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6249 hdev->wake_addr_type = direct_adv->bdaddr_type;
6250 break;
6251 case HCI_EV_LE_EXT_ADV_REPORT:
6252 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6253 hdev->wake_addr_type = ext_adv->bdaddr_type;
6254 break;
6255 }
6256 }
6257 } else {
6258 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6259 }
6260
6261 unlock:
6262 hci_dev_unlock(hdev);
6263 }
6264
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)6265 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6266 {
6267 struct hci_event_hdr *hdr = (void *) skb->data;
6268 hci_req_complete_t req_complete = NULL;
6269 hci_req_complete_skb_t req_complete_skb = NULL;
6270 struct sk_buff *orig_skb = NULL;
6271 u8 status = 0, event = hdr->evt, req_evt = 0;
6272 u16 opcode = HCI_OP_NOP;
6273
6274 if (!event) {
6275 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6276 goto done;
6277 }
6278
6279 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6280 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6281 opcode = __le16_to_cpu(cmd_hdr->opcode);
6282 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6283 &req_complete_skb);
6284 req_evt = event;
6285 }
6286
6287 /* If it looks like we might end up having to call
6288 * req_complete_skb, store a pristine copy of the skb since the
6289 * various handlers may modify the original one through
6290 * skb_pull() calls, etc.
6291 */
6292 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6293 event == HCI_EV_CMD_COMPLETE)
6294 orig_skb = skb_clone(skb, GFP_KERNEL);
6295
6296 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6297
6298 /* Store wake reason if we're suspended */
6299 hci_store_wake_reason(hdev, event, skb);
6300
6301 switch (event) {
6302 case HCI_EV_INQUIRY_COMPLETE:
6303 hci_inquiry_complete_evt(hdev, skb);
6304 break;
6305
6306 case HCI_EV_INQUIRY_RESULT:
6307 hci_inquiry_result_evt(hdev, skb);
6308 break;
6309
6310 case HCI_EV_CONN_COMPLETE:
6311 hci_conn_complete_evt(hdev, skb);
6312 break;
6313
6314 case HCI_EV_CONN_REQUEST:
6315 hci_conn_request_evt(hdev, skb);
6316 break;
6317
6318 case HCI_EV_DISCONN_COMPLETE:
6319 hci_disconn_complete_evt(hdev, skb);
6320 break;
6321
6322 case HCI_EV_AUTH_COMPLETE:
6323 hci_auth_complete_evt(hdev, skb);
6324 break;
6325
6326 case HCI_EV_REMOTE_NAME:
6327 hci_remote_name_evt(hdev, skb);
6328 break;
6329
6330 case HCI_EV_ENCRYPT_CHANGE:
6331 hci_encrypt_change_evt(hdev, skb);
6332 break;
6333
6334 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6335 hci_change_link_key_complete_evt(hdev, skb);
6336 break;
6337
6338 case HCI_EV_REMOTE_FEATURES:
6339 hci_remote_features_evt(hdev, skb);
6340 break;
6341
6342 case HCI_EV_CMD_COMPLETE:
6343 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6344 &req_complete, &req_complete_skb);
6345 break;
6346
6347 case HCI_EV_CMD_STATUS:
6348 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6349 &req_complete_skb);
6350 break;
6351
6352 case HCI_EV_HARDWARE_ERROR:
6353 hci_hardware_error_evt(hdev, skb);
6354 break;
6355
6356 case HCI_EV_ROLE_CHANGE:
6357 hci_role_change_evt(hdev, skb);
6358 break;
6359
6360 case HCI_EV_NUM_COMP_PKTS:
6361 hci_num_comp_pkts_evt(hdev, skb);
6362 break;
6363
6364 case HCI_EV_MODE_CHANGE:
6365 hci_mode_change_evt(hdev, skb);
6366 break;
6367
6368 case HCI_EV_PIN_CODE_REQ:
6369 hci_pin_code_request_evt(hdev, skb);
6370 break;
6371
6372 case HCI_EV_LINK_KEY_REQ:
6373 hci_link_key_request_evt(hdev, skb);
6374 break;
6375
6376 case HCI_EV_LINK_KEY_NOTIFY:
6377 hci_link_key_notify_evt(hdev, skb);
6378 break;
6379
6380 case HCI_EV_CLOCK_OFFSET:
6381 hci_clock_offset_evt(hdev, skb);
6382 break;
6383
6384 case HCI_EV_PKT_TYPE_CHANGE:
6385 hci_pkt_type_change_evt(hdev, skb);
6386 break;
6387
6388 case HCI_EV_PSCAN_REP_MODE:
6389 hci_pscan_rep_mode_evt(hdev, skb);
6390 break;
6391
6392 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6393 hci_inquiry_result_with_rssi_evt(hdev, skb);
6394 break;
6395
6396 case HCI_EV_REMOTE_EXT_FEATURES:
6397 hci_remote_ext_features_evt(hdev, skb);
6398 break;
6399
6400 case HCI_EV_SYNC_CONN_COMPLETE:
6401 hci_sync_conn_complete_evt(hdev, skb);
6402 break;
6403
6404 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6405 hci_extended_inquiry_result_evt(hdev, skb);
6406 break;
6407
6408 case HCI_EV_KEY_REFRESH_COMPLETE:
6409 hci_key_refresh_complete_evt(hdev, skb);
6410 break;
6411
6412 case HCI_EV_IO_CAPA_REQUEST:
6413 hci_io_capa_request_evt(hdev, skb);
6414 break;
6415
6416 case HCI_EV_IO_CAPA_REPLY:
6417 hci_io_capa_reply_evt(hdev, skb);
6418 break;
6419
6420 case HCI_EV_USER_CONFIRM_REQUEST:
6421 hci_user_confirm_request_evt(hdev, skb);
6422 break;
6423
6424 case HCI_EV_USER_PASSKEY_REQUEST:
6425 hci_user_passkey_request_evt(hdev, skb);
6426 break;
6427
6428 case HCI_EV_USER_PASSKEY_NOTIFY:
6429 hci_user_passkey_notify_evt(hdev, skb);
6430 break;
6431
6432 case HCI_EV_KEYPRESS_NOTIFY:
6433 hci_keypress_notify_evt(hdev, skb);
6434 break;
6435
6436 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6437 hci_simple_pair_complete_evt(hdev, skb);
6438 break;
6439
6440 case HCI_EV_REMOTE_HOST_FEATURES:
6441 hci_remote_host_features_evt(hdev, skb);
6442 break;
6443
6444 case HCI_EV_LE_META:
6445 hci_le_meta_evt(hdev, skb);
6446 break;
6447
6448 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6449 hci_remote_oob_data_request_evt(hdev, skb);
6450 break;
6451
6452 #if IS_ENABLED(CONFIG_BT_HS)
6453 case HCI_EV_CHANNEL_SELECTED:
6454 hci_chan_selected_evt(hdev, skb);
6455 break;
6456
6457 case HCI_EV_PHY_LINK_COMPLETE:
6458 hci_phy_link_complete_evt(hdev, skb);
6459 break;
6460
6461 case HCI_EV_LOGICAL_LINK_COMPLETE:
6462 hci_loglink_complete_evt(hdev, skb);
6463 break;
6464
6465 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6466 hci_disconn_loglink_complete_evt(hdev, skb);
6467 break;
6468
6469 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6470 hci_disconn_phylink_complete_evt(hdev, skb);
6471 break;
6472 #endif
6473
6474 case HCI_EV_NUM_COMP_BLOCKS:
6475 hci_num_comp_blocks_evt(hdev, skb);
6476 break;
6477
6478 case HCI_EV_VENDOR:
6479 msft_vendor_evt(hdev, skb);
6480 break;
6481
6482 default:
6483 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6484 break;
6485 }
6486
6487 if (req_complete) {
6488 req_complete(hdev, status, opcode);
6489 } else if (req_complete_skb) {
6490 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6491 kfree_skb(orig_skb);
6492 orig_skb = NULL;
6493 }
6494 req_complete_skb(hdev, status, opcode, orig_skb);
6495 }
6496
6497 done:
6498 kfree_skb(orig_skb);
6499 kfree_skb(skb);
6500 hdev->stat.evt_rx++;
6501 }
6502