1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 18
44
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_READ_INFO,
48 MGMT_OP_SET_POWERED,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
52 MGMT_OP_SET_BONDABLE,
53 MGMT_OP_SET_LINK_SECURITY,
54 MGMT_OP_SET_SSP,
55 MGMT_OP_SET_HS,
56 MGMT_OP_SET_LE,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_ADD_UUID,
60 MGMT_OP_REMOVE_UUID,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
63 MGMT_OP_DISCONNECT,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
68 MGMT_OP_PAIR_DEVICE,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_CONFIRM_NAME,
81 MGMT_OP_BLOCK_DEVICE,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
85 MGMT_OP_SET_BREDR,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_SET_PRIVACY,
91 MGMT_OP_LOAD_IRKS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
94 MGMT_OP_ADD_DEVICE,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_SET_BLOCKED_KEYS,
112 MGMT_OP_SET_WIDEBAND_SPEECH,
113 MGMT_OP_READ_SECURITY_INFO,
114 MGMT_OP_READ_EXP_FEATURES_INFO,
115 MGMT_OP_SET_EXP_FEATURE,
116 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 MGMT_OP_GET_DEVICE_FLAGS,
121 MGMT_OP_SET_DEVICE_FLAGS,
122 MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 MGMT_OP_REMOVE_ADV_MONITOR,
125 };
126
127 static const u16 mgmt_events[] = {
128 MGMT_EV_CONTROLLER_ERROR,
129 MGMT_EV_INDEX_ADDED,
130 MGMT_EV_INDEX_REMOVED,
131 MGMT_EV_NEW_SETTINGS,
132 MGMT_EV_CLASS_OF_DEV_CHANGED,
133 MGMT_EV_LOCAL_NAME_CHANGED,
134 MGMT_EV_NEW_LINK_KEY,
135 MGMT_EV_NEW_LONG_TERM_KEY,
136 MGMT_EV_DEVICE_CONNECTED,
137 MGMT_EV_DEVICE_DISCONNECTED,
138 MGMT_EV_CONNECT_FAILED,
139 MGMT_EV_PIN_CODE_REQUEST,
140 MGMT_EV_USER_CONFIRM_REQUEST,
141 MGMT_EV_USER_PASSKEY_REQUEST,
142 MGMT_EV_AUTH_FAILED,
143 MGMT_EV_DEVICE_FOUND,
144 MGMT_EV_DISCOVERING,
145 MGMT_EV_DEVICE_BLOCKED,
146 MGMT_EV_DEVICE_UNBLOCKED,
147 MGMT_EV_DEVICE_UNPAIRED,
148 MGMT_EV_PASSKEY_NOTIFY,
149 MGMT_EV_NEW_IRK,
150 MGMT_EV_NEW_CSRK,
151 MGMT_EV_DEVICE_ADDED,
152 MGMT_EV_DEVICE_REMOVED,
153 MGMT_EV_NEW_CONN_PARAM,
154 MGMT_EV_UNCONF_INDEX_ADDED,
155 MGMT_EV_UNCONF_INDEX_REMOVED,
156 MGMT_EV_NEW_CONFIG_OPTIONS,
157 MGMT_EV_EXT_INDEX_ADDED,
158 MGMT_EV_EXT_INDEX_REMOVED,
159 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
160 MGMT_EV_ADVERTISING_ADDED,
161 MGMT_EV_ADVERTISING_REMOVED,
162 MGMT_EV_EXT_INFO_CHANGED,
163 MGMT_EV_PHY_CONFIGURATION_CHANGED,
164 MGMT_EV_EXP_FEATURE_CHANGED,
165 MGMT_EV_DEVICE_FLAGS_CHANGED,
166 MGMT_EV_CONTROLLER_SUSPEND,
167 MGMT_EV_CONTROLLER_RESUME,
168 };
169
170 static const u16 mgmt_untrusted_commands[] = {
171 MGMT_OP_READ_INDEX_LIST,
172 MGMT_OP_READ_INFO,
173 MGMT_OP_READ_UNCONF_INDEX_LIST,
174 MGMT_OP_READ_CONFIG_INFO,
175 MGMT_OP_READ_EXT_INDEX_LIST,
176 MGMT_OP_READ_EXT_INFO,
177 MGMT_OP_READ_SECURITY_INFO,
178 MGMT_OP_READ_EXP_FEATURES_INFO,
179 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
180 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
181 };
182
183 static const u16 mgmt_untrusted_events[] = {
184 MGMT_EV_INDEX_ADDED,
185 MGMT_EV_INDEX_REMOVED,
186 MGMT_EV_NEW_SETTINGS,
187 MGMT_EV_CLASS_OF_DEV_CHANGED,
188 MGMT_EV_LOCAL_NAME_CHANGED,
189 MGMT_EV_UNCONF_INDEX_ADDED,
190 MGMT_EV_UNCONF_INDEX_REMOVED,
191 MGMT_EV_NEW_CONFIG_OPTIONS,
192 MGMT_EV_EXT_INDEX_ADDED,
193 MGMT_EV_EXT_INDEX_REMOVED,
194 MGMT_EV_EXT_INFO_CHANGED,
195 MGMT_EV_EXP_FEATURE_CHANGED,
196 MGMT_EV_ADV_MONITOR_ADDED,
197 MGMT_EV_ADV_MONITOR_REMOVED,
198 };
199
200 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
201
202 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
203 "\x00\x00\x00\x00\x00\x00\x00\x00"
204
205 /* HCI to MGMT error code conversion table */
206 static const u8 mgmt_status_table[] = {
207 MGMT_STATUS_SUCCESS,
208 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
209 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
210 MGMT_STATUS_FAILED, /* Hardware Failure */
211 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
212 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
213 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
214 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
215 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
216 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
217 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
218 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
219 MGMT_STATUS_BUSY, /* Command Disallowed */
220 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
221 MGMT_STATUS_REJECTED, /* Rejected Security */
222 MGMT_STATUS_REJECTED, /* Rejected Personal */
223 MGMT_STATUS_TIMEOUT, /* Host Timeout */
224 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
225 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
226 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
227 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
228 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
229 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
230 MGMT_STATUS_BUSY, /* Repeated Attempts */
231 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
232 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
234 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
235 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
236 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
237 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
238 MGMT_STATUS_FAILED, /* Unspecified Error */
239 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
240 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
241 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
242 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
243 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
244 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
245 MGMT_STATUS_FAILED, /* Unit Link Key Used */
246 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
247 MGMT_STATUS_TIMEOUT, /* Instant Passed */
248 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
249 MGMT_STATUS_FAILED, /* Transaction Collision */
250 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
251 MGMT_STATUS_REJECTED, /* QoS Rejected */
252 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
253 MGMT_STATUS_REJECTED, /* Insufficient Security */
254 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
255 MGMT_STATUS_BUSY, /* Role Switch Pending */
256 MGMT_STATUS_FAILED, /* Slot Violation */
257 MGMT_STATUS_FAILED, /* Role Switch Failed */
258 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
259 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
260 MGMT_STATUS_BUSY, /* Host Busy Pairing */
261 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
262 MGMT_STATUS_BUSY, /* Controller Busy */
263 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
264 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
265 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
266 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
267 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
268 };
269
mgmt_status(u8 hci_status)270 static u8 mgmt_status(u8 hci_status)
271 {
272 if (hci_status < ARRAY_SIZE(mgmt_status_table))
273 return mgmt_status_table[hci_status];
274
275 return MGMT_STATUS_FAILED;
276 }
277
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)278 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
279 u16 len, int flag)
280 {
281 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
282 flag, NULL);
283 }
284
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)285 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
286 u16 len, int flag, struct sock *skip_sk)
287 {
288 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
289 flag, skip_sk);
290 }
291
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)292 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
293 struct sock *skip_sk)
294 {
295 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
296 HCI_SOCK_TRUSTED, skip_sk);
297 }
298
le_addr_type(u8 mgmt_addr_type)299 static u8 le_addr_type(u8 mgmt_addr_type)
300 {
301 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
302 return ADDR_LE_DEV_PUBLIC;
303 else
304 return ADDR_LE_DEV_RANDOM;
305 }
306
mgmt_fill_version_info(void * ver)307 void mgmt_fill_version_info(void *ver)
308 {
309 struct mgmt_rp_read_version *rp = ver;
310
311 rp->version = MGMT_VERSION;
312 rp->revision = cpu_to_le16(MGMT_REVISION);
313 }
314
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)315 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
316 u16 data_len)
317 {
318 struct mgmt_rp_read_version rp;
319
320 bt_dev_dbg(hdev, "sock %p", sk);
321
322 mgmt_fill_version_info(&rp);
323
324 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
325 &rp, sizeof(rp));
326 }
327
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)328 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
329 u16 data_len)
330 {
331 struct mgmt_rp_read_commands *rp;
332 u16 num_commands, num_events;
333 size_t rp_size;
334 int i, err;
335
336 bt_dev_dbg(hdev, "sock %p", sk);
337
338 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
339 num_commands = ARRAY_SIZE(mgmt_commands);
340 num_events = ARRAY_SIZE(mgmt_events);
341 } else {
342 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
343 num_events = ARRAY_SIZE(mgmt_untrusted_events);
344 }
345
346 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
347
348 rp = kmalloc(rp_size, GFP_KERNEL);
349 if (!rp)
350 return -ENOMEM;
351
352 rp->num_commands = cpu_to_le16(num_commands);
353 rp->num_events = cpu_to_le16(num_events);
354
355 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
356 __le16 *opcode = rp->opcodes;
357
358 for (i = 0; i < num_commands; i++, opcode++)
359 put_unaligned_le16(mgmt_commands[i], opcode);
360
361 for (i = 0; i < num_events; i++, opcode++)
362 put_unaligned_le16(mgmt_events[i], opcode);
363 } else {
364 __le16 *opcode = rp->opcodes;
365
366 for (i = 0; i < num_commands; i++, opcode++)
367 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
368
369 for (i = 0; i < num_events; i++, opcode++)
370 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
371 }
372
373 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
374 rp, rp_size);
375 kfree(rp);
376
377 return err;
378 }
379
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)380 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
381 u16 data_len)
382 {
383 struct mgmt_rp_read_index_list *rp;
384 struct hci_dev *d;
385 size_t rp_len;
386 u16 count;
387 int err;
388
389 bt_dev_dbg(hdev, "sock %p", sk);
390
391 read_lock(&hci_dev_list_lock);
392
393 count = 0;
394 list_for_each_entry(d, &hci_dev_list, list) {
395 if (d->dev_type == HCI_PRIMARY &&
396 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
397 count++;
398 }
399
400 rp_len = sizeof(*rp) + (2 * count);
401 rp = kmalloc(rp_len, GFP_ATOMIC);
402 if (!rp) {
403 read_unlock(&hci_dev_list_lock);
404 return -ENOMEM;
405 }
406
407 count = 0;
408 list_for_each_entry(d, &hci_dev_list, list) {
409 if (hci_dev_test_flag(d, HCI_SETUP) ||
410 hci_dev_test_flag(d, HCI_CONFIG) ||
411 hci_dev_test_flag(d, HCI_USER_CHANNEL))
412 continue;
413
414 /* Devices marked as raw-only are neither configured
415 * nor unconfigured controllers.
416 */
417 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
418 continue;
419
420 if (d->dev_type == HCI_PRIMARY &&
421 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
422 rp->index[count++] = cpu_to_le16(d->id);
423 bt_dev_dbg(hdev, "Added hci%u", d->id);
424 }
425 }
426
427 rp->num_controllers = cpu_to_le16(count);
428 rp_len = sizeof(*rp) + (2 * count);
429
430 read_unlock(&hci_dev_list_lock);
431
432 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
433 0, rp, rp_len);
434
435 kfree(rp);
436
437 return err;
438 }
439
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)440 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
441 void *data, u16 data_len)
442 {
443 struct mgmt_rp_read_unconf_index_list *rp;
444 struct hci_dev *d;
445 size_t rp_len;
446 u16 count;
447 int err;
448
449 bt_dev_dbg(hdev, "sock %p", sk);
450
451 read_lock(&hci_dev_list_lock);
452
453 count = 0;
454 list_for_each_entry(d, &hci_dev_list, list) {
455 if (d->dev_type == HCI_PRIMARY &&
456 hci_dev_test_flag(d, HCI_UNCONFIGURED))
457 count++;
458 }
459
460 rp_len = sizeof(*rp) + (2 * count);
461 rp = kmalloc(rp_len, GFP_ATOMIC);
462 if (!rp) {
463 read_unlock(&hci_dev_list_lock);
464 return -ENOMEM;
465 }
466
467 count = 0;
468 list_for_each_entry(d, &hci_dev_list, list) {
469 if (hci_dev_test_flag(d, HCI_SETUP) ||
470 hci_dev_test_flag(d, HCI_CONFIG) ||
471 hci_dev_test_flag(d, HCI_USER_CHANNEL))
472 continue;
473
474 /* Devices marked as raw-only are neither configured
475 * nor unconfigured controllers.
476 */
477 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
478 continue;
479
480 if (d->dev_type == HCI_PRIMARY &&
481 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
482 rp->index[count++] = cpu_to_le16(d->id);
483 bt_dev_dbg(hdev, "Added hci%u", d->id);
484 }
485 }
486
487 rp->num_controllers = cpu_to_le16(count);
488 rp_len = sizeof(*rp) + (2 * count);
489
490 read_unlock(&hci_dev_list_lock);
491
492 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
493 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
494
495 kfree(rp);
496
497 return err;
498 }
499
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)500 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
501 void *data, u16 data_len)
502 {
503 struct mgmt_rp_read_ext_index_list *rp;
504 struct hci_dev *d;
505 u16 count;
506 int err;
507
508 bt_dev_dbg(hdev, "sock %p", sk);
509
510 read_lock(&hci_dev_list_lock);
511
512 count = 0;
513 list_for_each_entry(d, &hci_dev_list, list) {
514 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
515 count++;
516 }
517
518 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
519 if (!rp) {
520 read_unlock(&hci_dev_list_lock);
521 return -ENOMEM;
522 }
523
524 count = 0;
525 list_for_each_entry(d, &hci_dev_list, list) {
526 if (hci_dev_test_flag(d, HCI_SETUP) ||
527 hci_dev_test_flag(d, HCI_CONFIG) ||
528 hci_dev_test_flag(d, HCI_USER_CHANNEL))
529 continue;
530
531 /* Devices marked as raw-only are neither configured
532 * nor unconfigured controllers.
533 */
534 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
535 continue;
536
537 if (d->dev_type == HCI_PRIMARY) {
538 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
539 rp->entry[count].type = 0x01;
540 else
541 rp->entry[count].type = 0x00;
542 } else if (d->dev_type == HCI_AMP) {
543 rp->entry[count].type = 0x02;
544 } else {
545 continue;
546 }
547
548 rp->entry[count].bus = d->bus;
549 rp->entry[count++].index = cpu_to_le16(d->id);
550 bt_dev_dbg(hdev, "Added hci%u", d->id);
551 }
552
553 rp->num_controllers = cpu_to_le16(count);
554
555 read_unlock(&hci_dev_list_lock);
556
557 /* If this command is called at least once, then all the
558 * default index and unconfigured index events are disabled
559 * and from now on only extended index events are used.
560 */
561 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
562 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
563 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
564
565 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
566 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
567 struct_size(rp, entry, count));
568
569 kfree(rp);
570
571 return err;
572 }
573
is_configured(struct hci_dev * hdev)574 static bool is_configured(struct hci_dev *hdev)
575 {
576 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
577 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
578 return false;
579
580 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
581 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
582 !bacmp(&hdev->public_addr, BDADDR_ANY))
583 return false;
584
585 return true;
586 }
587
get_missing_options(struct hci_dev * hdev)588 static __le32 get_missing_options(struct hci_dev *hdev)
589 {
590 u32 options = 0;
591
592 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
593 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
594 options |= MGMT_OPTION_EXTERNAL_CONFIG;
595
596 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
597 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
598 !bacmp(&hdev->public_addr, BDADDR_ANY))
599 options |= MGMT_OPTION_PUBLIC_ADDRESS;
600
601 return cpu_to_le32(options);
602 }
603
new_options(struct hci_dev * hdev,struct sock * skip)604 static int new_options(struct hci_dev *hdev, struct sock *skip)
605 {
606 __le32 options = get_missing_options(hdev);
607
608 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
609 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
610 }
611
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)612 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
613 {
614 __le32 options = get_missing_options(hdev);
615
616 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
617 sizeof(options));
618 }
619
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)620 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
621 void *data, u16 data_len)
622 {
623 struct mgmt_rp_read_config_info rp;
624 u32 options = 0;
625
626 bt_dev_dbg(hdev, "sock %p", sk);
627
628 hci_dev_lock(hdev);
629
630 memset(&rp, 0, sizeof(rp));
631 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
632
633 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
634 options |= MGMT_OPTION_EXTERNAL_CONFIG;
635
636 if (hdev->set_bdaddr)
637 options |= MGMT_OPTION_PUBLIC_ADDRESS;
638
639 rp.supported_options = cpu_to_le32(options);
640 rp.missing_options = get_missing_options(hdev);
641
642 hci_dev_unlock(hdev);
643
644 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
645 &rp, sizeof(rp));
646 }
647
get_supported_phys(struct hci_dev * hdev)648 static u32 get_supported_phys(struct hci_dev *hdev)
649 {
650 u32 supported_phys = 0;
651
652 if (lmp_bredr_capable(hdev)) {
653 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
654
655 if (hdev->features[0][0] & LMP_3SLOT)
656 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
657
658 if (hdev->features[0][0] & LMP_5SLOT)
659 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
660
661 if (lmp_edr_2m_capable(hdev)) {
662 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
663
664 if (lmp_edr_3slot_capable(hdev))
665 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
666
667 if (lmp_edr_5slot_capable(hdev))
668 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
669
670 if (lmp_edr_3m_capable(hdev)) {
671 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
672
673 if (lmp_edr_3slot_capable(hdev))
674 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
675
676 if (lmp_edr_5slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
678 }
679 }
680 }
681
682 if (lmp_le_capable(hdev)) {
683 supported_phys |= MGMT_PHY_LE_1M_TX;
684 supported_phys |= MGMT_PHY_LE_1M_RX;
685
686 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
687 supported_phys |= MGMT_PHY_LE_2M_TX;
688 supported_phys |= MGMT_PHY_LE_2M_RX;
689 }
690
691 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
692 supported_phys |= MGMT_PHY_LE_CODED_TX;
693 supported_phys |= MGMT_PHY_LE_CODED_RX;
694 }
695 }
696
697 return supported_phys;
698 }
699
get_selected_phys(struct hci_dev * hdev)700 static u32 get_selected_phys(struct hci_dev *hdev)
701 {
702 u32 selected_phys = 0;
703
704 if (lmp_bredr_capable(hdev)) {
705 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
706
707 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
708 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
709
710 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
711 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
712
713 if (lmp_edr_2m_capable(hdev)) {
714 if (!(hdev->pkt_type & HCI_2DH1))
715 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
716
717 if (lmp_edr_3slot_capable(hdev) &&
718 !(hdev->pkt_type & HCI_2DH3))
719 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
720
721 if (lmp_edr_5slot_capable(hdev) &&
722 !(hdev->pkt_type & HCI_2DH5))
723 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
724
725 if (lmp_edr_3m_capable(hdev)) {
726 if (!(hdev->pkt_type & HCI_3DH1))
727 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
728
729 if (lmp_edr_3slot_capable(hdev) &&
730 !(hdev->pkt_type & HCI_3DH3))
731 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
732
733 if (lmp_edr_5slot_capable(hdev) &&
734 !(hdev->pkt_type & HCI_3DH5))
735 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
736 }
737 }
738 }
739
740 if (lmp_le_capable(hdev)) {
741 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
742 selected_phys |= MGMT_PHY_LE_1M_TX;
743
744 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
745 selected_phys |= MGMT_PHY_LE_1M_RX;
746
747 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
748 selected_phys |= MGMT_PHY_LE_2M_TX;
749
750 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
751 selected_phys |= MGMT_PHY_LE_2M_RX;
752
753 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
754 selected_phys |= MGMT_PHY_LE_CODED_TX;
755
756 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
757 selected_phys |= MGMT_PHY_LE_CODED_RX;
758 }
759
760 return selected_phys;
761 }
762
get_configurable_phys(struct hci_dev * hdev)763 static u32 get_configurable_phys(struct hci_dev *hdev)
764 {
765 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
766 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
767 }
768
get_supported_settings(struct hci_dev * hdev)769 static u32 get_supported_settings(struct hci_dev *hdev)
770 {
771 u32 settings = 0;
772
773 settings |= MGMT_SETTING_POWERED;
774 settings |= MGMT_SETTING_BONDABLE;
775 settings |= MGMT_SETTING_DEBUG_KEYS;
776 settings |= MGMT_SETTING_CONNECTABLE;
777 settings |= MGMT_SETTING_DISCOVERABLE;
778
779 if (lmp_bredr_capable(hdev)) {
780 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
781 settings |= MGMT_SETTING_FAST_CONNECTABLE;
782 settings |= MGMT_SETTING_BREDR;
783 settings |= MGMT_SETTING_LINK_SECURITY;
784
785 if (lmp_ssp_capable(hdev)) {
786 settings |= MGMT_SETTING_SSP;
787 if (IS_ENABLED(CONFIG_BT_HS))
788 settings |= MGMT_SETTING_HS;
789 }
790
791 if (lmp_sc_capable(hdev))
792 settings |= MGMT_SETTING_SECURE_CONN;
793
794 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
795 &hdev->quirks))
796 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
797 }
798
799 if (lmp_le_capable(hdev)) {
800 settings |= MGMT_SETTING_LE;
801 settings |= MGMT_SETTING_SECURE_CONN;
802 settings |= MGMT_SETTING_PRIVACY;
803 settings |= MGMT_SETTING_STATIC_ADDRESS;
804
805 /* When the experimental feature for LL Privacy support is
806 * enabled, then advertising is no longer supported.
807 */
808 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
809 settings |= MGMT_SETTING_ADVERTISING;
810 }
811
812 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
813 hdev->set_bdaddr)
814 settings |= MGMT_SETTING_CONFIGURATION;
815
816 settings |= MGMT_SETTING_PHY_CONFIGURATION;
817
818 return settings;
819 }
820
get_current_settings(struct hci_dev * hdev)821 static u32 get_current_settings(struct hci_dev *hdev)
822 {
823 u32 settings = 0;
824
825 if (hdev_is_powered(hdev))
826 settings |= MGMT_SETTING_POWERED;
827
828 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
829 settings |= MGMT_SETTING_CONNECTABLE;
830
831 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833
834 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
835 settings |= MGMT_SETTING_DISCOVERABLE;
836
837 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
838 settings |= MGMT_SETTING_BONDABLE;
839
840 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
841 settings |= MGMT_SETTING_BREDR;
842
843 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
844 settings |= MGMT_SETTING_LE;
845
846 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
847 settings |= MGMT_SETTING_LINK_SECURITY;
848
849 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
850 settings |= MGMT_SETTING_SSP;
851
852 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
853 settings |= MGMT_SETTING_HS;
854
855 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
856 settings |= MGMT_SETTING_ADVERTISING;
857
858 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
859 settings |= MGMT_SETTING_SECURE_CONN;
860
861 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
862 settings |= MGMT_SETTING_DEBUG_KEYS;
863
864 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
865 settings |= MGMT_SETTING_PRIVACY;
866
867 /* The current setting for static address has two purposes. The
868 * first is to indicate if the static address will be used and
869 * the second is to indicate if it is actually set.
870 *
871 * This means if the static address is not configured, this flag
872 * will never be set. If the address is configured, then if the
873 * address is actually used decides if the flag is set or not.
874 *
875 * For single mode LE only controllers and dual-mode controllers
876 * with BR/EDR disabled, the existence of the static address will
877 * be evaluated.
878 */
879 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
880 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
881 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
882 if (bacmp(&hdev->static_addr, BDADDR_ANY))
883 settings |= MGMT_SETTING_STATIC_ADDRESS;
884 }
885
886 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
887 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
888
889 return settings;
890 }
891
pending_find(u16 opcode,struct hci_dev * hdev)892 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
893 {
894 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
895 }
896
pending_find_data(u16 opcode,struct hci_dev * hdev,const void * data)897 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
898 struct hci_dev *hdev,
899 const void *data)
900 {
901 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
902 }
903
mgmt_get_adv_discov_flags(struct hci_dev * hdev)904 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
905 {
906 struct mgmt_pending_cmd *cmd;
907
908 /* If there's a pending mgmt command the flags will not yet have
909 * their final values, so check for this first.
910 */
911 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
912 if (cmd) {
913 struct mgmt_mode *cp = cmd->param;
914 if (cp->val == 0x01)
915 return LE_AD_GENERAL;
916 else if (cp->val == 0x02)
917 return LE_AD_LIMITED;
918 } else {
919 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
920 return LE_AD_LIMITED;
921 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
922 return LE_AD_GENERAL;
923 }
924
925 return 0;
926 }
927
mgmt_get_connectable(struct hci_dev * hdev)928 bool mgmt_get_connectable(struct hci_dev *hdev)
929 {
930 struct mgmt_pending_cmd *cmd;
931
932 /* If there's a pending mgmt command the flag will not yet have
933 * it's final value, so check for this first.
934 */
935 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
936 if (cmd) {
937 struct mgmt_mode *cp = cmd->param;
938
939 return cp->val;
940 }
941
942 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
943 }
944
service_cache_off(struct work_struct * work)945 static void service_cache_off(struct work_struct *work)
946 {
947 struct hci_dev *hdev = container_of(work, struct hci_dev,
948 service_cache.work);
949 struct hci_request req;
950
951 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
952 return;
953
954 hci_req_init(&req, hdev);
955
956 hci_dev_lock(hdev);
957
958 __hci_req_update_eir(&req);
959 __hci_req_update_class(&req);
960
961 hci_dev_unlock(hdev);
962
963 hci_req_run(&req, NULL);
964 }
965
rpa_expired(struct work_struct * work)966 static void rpa_expired(struct work_struct *work)
967 {
968 struct hci_dev *hdev = container_of(work, struct hci_dev,
969 rpa_expired.work);
970 struct hci_request req;
971
972 bt_dev_dbg(hdev, "");
973
974 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
975
976 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
977 return;
978
979 /* The generation of a new RPA and programming it into the
980 * controller happens in the hci_req_enable_advertising()
981 * function.
982 */
983 hci_req_init(&req, hdev);
984 if (ext_adv_capable(hdev))
985 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
986 else
987 __hci_req_enable_advertising(&req);
988 hci_req_run(&req, NULL);
989 }
990
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)991 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
992 {
993 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
994 return;
995
996 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
997 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
998
999 /* Non-mgmt controlled devices get this bit set
1000 * implicitly so that pairing works for them, however
1001 * for mgmt we require user-space to explicitly enable
1002 * it
1003 */
1004 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1005 }
1006
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1007 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1008 void *data, u16 data_len)
1009 {
1010 struct mgmt_rp_read_info rp;
1011
1012 bt_dev_dbg(hdev, "sock %p", sk);
1013
1014 hci_dev_lock(hdev);
1015
1016 memset(&rp, 0, sizeof(rp));
1017
1018 bacpy(&rp.bdaddr, &hdev->bdaddr);
1019
1020 rp.version = hdev->hci_ver;
1021 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1022
1023 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1024 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1025
1026 memcpy(rp.dev_class, hdev->dev_class, 3);
1027
1028 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1029 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1030
1031 hci_dev_unlock(hdev);
1032
1033 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1034 sizeof(rp));
1035 }
1036
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1037 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1038 {
1039 u16 eir_len = 0;
1040 size_t name_len;
1041
1042 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1043 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1044 hdev->dev_class, 3);
1045
1046 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1047 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1048 hdev->appearance);
1049
1050 name_len = strlen(hdev->dev_name);
1051 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1052 hdev->dev_name, name_len);
1053
1054 name_len = strlen(hdev->short_name);
1055 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1056 hdev->short_name, name_len);
1057
1058 return eir_len;
1059 }
1060
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1061 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1062 void *data, u16 data_len)
1063 {
1064 char buf[512];
1065 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1066 u16 eir_len;
1067
1068 bt_dev_dbg(hdev, "sock %p", sk);
1069
1070 memset(&buf, 0, sizeof(buf));
1071
1072 hci_dev_lock(hdev);
1073
1074 bacpy(&rp->bdaddr, &hdev->bdaddr);
1075
1076 rp->version = hdev->hci_ver;
1077 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1078
1079 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1080 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1081
1082
1083 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1084 rp->eir_len = cpu_to_le16(eir_len);
1085
1086 hci_dev_unlock(hdev);
1087
1088 /* If this command is called at least once, then the events
1089 * for class of device and local name changes are disabled
1090 * and only the new extended controller information event
1091 * is used.
1092 */
1093 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1094 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1095 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1096
1097 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1098 sizeof(*rp) + eir_len);
1099 }
1100
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1101 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1102 {
1103 char buf[512];
1104 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1105 u16 eir_len;
1106
1107 memset(buf, 0, sizeof(buf));
1108
1109 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1110 ev->eir_len = cpu_to_le16(eir_len);
1111
1112 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1113 sizeof(*ev) + eir_len,
1114 HCI_MGMT_EXT_INFO_EVENTS, skip);
1115 }
1116
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1117 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1118 {
1119 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1120
1121 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1122 sizeof(settings));
1123 }
1124
clean_up_hci_complete(struct hci_dev * hdev,u8 status,u16 opcode)1125 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1126 {
1127 bt_dev_dbg(hdev, "status 0x%02x", status);
1128
1129 if (hci_conn_count(hdev) == 0) {
1130 cancel_delayed_work(&hdev->power_off);
1131 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1132 }
1133 }
1134
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1135 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1136 {
1137 struct mgmt_ev_advertising_added ev;
1138
1139 ev.instance = instance;
1140
1141 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1142 }
1143
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1144 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1145 u8 instance)
1146 {
1147 struct mgmt_ev_advertising_removed ev;
1148
1149 ev.instance = instance;
1150
1151 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1152 }
1153
cancel_adv_timeout(struct hci_dev * hdev)1154 static void cancel_adv_timeout(struct hci_dev *hdev)
1155 {
1156 if (hdev->adv_instance_timeout) {
1157 hdev->adv_instance_timeout = 0;
1158 cancel_delayed_work(&hdev->adv_instance_expire);
1159 }
1160 }
1161
clean_up_hci_state(struct hci_dev * hdev)1162 static int clean_up_hci_state(struct hci_dev *hdev)
1163 {
1164 struct hci_request req;
1165 struct hci_conn *conn;
1166 bool discov_stopped;
1167 int err;
1168
1169 hci_req_init(&req, hdev);
1170
1171 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1172 test_bit(HCI_PSCAN, &hdev->flags)) {
1173 u8 scan = 0x00;
1174 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1175 }
1176
1177 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1178
1179 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1180 __hci_req_disable_advertising(&req);
1181
1182 discov_stopped = hci_req_stop_discovery(&req);
1183
1184 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1185 /* 0x15 == Terminated due to Power Off */
1186 __hci_abort_conn(&req, conn, 0x15);
1187 }
1188
1189 err = hci_req_run(&req, clean_up_hci_complete);
1190 if (!err && discov_stopped)
1191 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1192
1193 return err;
1194 }
1195
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1196 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1197 u16 len)
1198 {
1199 struct mgmt_mode *cp = data;
1200 struct mgmt_pending_cmd *cmd;
1201 int err;
1202
1203 bt_dev_dbg(hdev, "sock %p", sk);
1204
1205 if (cp->val != 0x00 && cp->val != 0x01)
1206 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1207 MGMT_STATUS_INVALID_PARAMS);
1208
1209 hci_dev_lock(hdev);
1210
1211 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1212 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1213 MGMT_STATUS_BUSY);
1214 goto failed;
1215 }
1216
1217 if (!!cp->val == hdev_is_powered(hdev)) {
1218 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1219 goto failed;
1220 }
1221
1222 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1223 if (!cmd) {
1224 err = -ENOMEM;
1225 goto failed;
1226 }
1227
1228 if (cp->val) {
1229 queue_work(hdev->req_workqueue, &hdev->power_on);
1230 err = 0;
1231 } else {
1232 /* Disconnect connections, stop scans, etc */
1233 err = clean_up_hci_state(hdev);
1234 if (!err)
1235 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1236 HCI_POWER_OFF_TIMEOUT);
1237
1238 /* ENODATA means there were no HCI commands queued */
1239 if (err == -ENODATA) {
1240 cancel_delayed_work(&hdev->power_off);
1241 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1242 err = 0;
1243 }
1244 }
1245
1246 failed:
1247 hci_dev_unlock(hdev);
1248 return err;
1249 }
1250
new_settings(struct hci_dev * hdev,struct sock * skip)1251 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1252 {
1253 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1254
1255 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1256 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1257 }
1258
mgmt_new_settings(struct hci_dev * hdev)1259 int mgmt_new_settings(struct hci_dev *hdev)
1260 {
1261 return new_settings(hdev, NULL);
1262 }
1263
1264 struct cmd_lookup {
1265 struct sock *sk;
1266 struct hci_dev *hdev;
1267 u8 mgmt_status;
1268 };
1269
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1270 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1271 {
1272 struct cmd_lookup *match = data;
1273
1274 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1275
1276 list_del(&cmd->list);
1277
1278 if (match->sk == NULL) {
1279 match->sk = cmd->sk;
1280 sock_hold(match->sk);
1281 }
1282
1283 mgmt_pending_free(cmd);
1284 }
1285
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1286 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1287 {
1288 u8 *status = data;
1289
1290 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1291 mgmt_pending_remove(cmd);
1292 }
1293
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1294 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1295 {
1296 if (cmd->cmd_complete) {
1297 u8 *status = data;
1298
1299 cmd->cmd_complete(cmd, *status);
1300 mgmt_pending_remove(cmd);
1301
1302 return;
1303 }
1304
1305 cmd_status_rsp(cmd, data);
1306 }
1307
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1308 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1309 {
1310 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1311 cmd->param, cmd->param_len);
1312 }
1313
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1314 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1315 {
1316 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1317 cmd->param, sizeof(struct mgmt_addr_info));
1318 }
1319
mgmt_bredr_support(struct hci_dev * hdev)1320 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1321 {
1322 if (!lmp_bredr_capable(hdev))
1323 return MGMT_STATUS_NOT_SUPPORTED;
1324 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1325 return MGMT_STATUS_REJECTED;
1326 else
1327 return MGMT_STATUS_SUCCESS;
1328 }
1329
mgmt_le_support(struct hci_dev * hdev)1330 static u8 mgmt_le_support(struct hci_dev *hdev)
1331 {
1332 if (!lmp_le_capable(hdev))
1333 return MGMT_STATUS_NOT_SUPPORTED;
1334 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1335 return MGMT_STATUS_REJECTED;
1336 else
1337 return MGMT_STATUS_SUCCESS;
1338 }
1339
mgmt_set_discoverable_complete(struct hci_dev * hdev,u8 status)1340 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1341 {
1342 struct mgmt_pending_cmd *cmd;
1343
1344 bt_dev_dbg(hdev, "status 0x%02x", status);
1345
1346 hci_dev_lock(hdev);
1347
1348 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1349 if (!cmd)
1350 goto unlock;
1351
1352 if (status) {
1353 u8 mgmt_err = mgmt_status(status);
1354 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1355 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1356 goto remove_cmd;
1357 }
1358
1359 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1360 hdev->discov_timeout > 0) {
1361 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1362 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1363 }
1364
1365 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1366 new_settings(hdev, cmd->sk);
1367
1368 remove_cmd:
1369 mgmt_pending_remove(cmd);
1370
1371 unlock:
1372 hci_dev_unlock(hdev);
1373 }
1374
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1375 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1376 u16 len)
1377 {
1378 struct mgmt_cp_set_discoverable *cp = data;
1379 struct mgmt_pending_cmd *cmd;
1380 u16 timeout;
1381 int err;
1382
1383 bt_dev_dbg(hdev, "sock %p", sk);
1384
1385 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1386 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1387 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1388 MGMT_STATUS_REJECTED);
1389
1390 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1391 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1392 MGMT_STATUS_INVALID_PARAMS);
1393
1394 timeout = __le16_to_cpu(cp->timeout);
1395
1396 /* Disabling discoverable requires that no timeout is set,
1397 * and enabling limited discoverable requires a timeout.
1398 */
1399 if ((cp->val == 0x00 && timeout > 0) ||
1400 (cp->val == 0x02 && timeout == 0))
1401 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1402 MGMT_STATUS_INVALID_PARAMS);
1403
1404 hci_dev_lock(hdev);
1405
1406 if (!hdev_is_powered(hdev) && timeout > 0) {
1407 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1408 MGMT_STATUS_NOT_POWERED);
1409 goto failed;
1410 }
1411
1412 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1413 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1414 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1415 MGMT_STATUS_BUSY);
1416 goto failed;
1417 }
1418
1419 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1420 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1421 MGMT_STATUS_REJECTED);
1422 goto failed;
1423 }
1424
1425 if (hdev->advertising_paused) {
1426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1427 MGMT_STATUS_BUSY);
1428 goto failed;
1429 }
1430
1431 if (!hdev_is_powered(hdev)) {
1432 bool changed = false;
1433
1434 /* Setting limited discoverable when powered off is
1435 * not a valid operation since it requires a timeout
1436 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1437 */
1438 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1439 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1440 changed = true;
1441 }
1442
1443 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1444 if (err < 0)
1445 goto failed;
1446
1447 if (changed)
1448 err = new_settings(hdev, sk);
1449
1450 goto failed;
1451 }
1452
1453 /* If the current mode is the same, then just update the timeout
1454 * value with the new value. And if only the timeout gets updated,
1455 * then no need for any HCI transactions.
1456 */
1457 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1458 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1459 HCI_LIMITED_DISCOVERABLE)) {
1460 cancel_delayed_work(&hdev->discov_off);
1461 hdev->discov_timeout = timeout;
1462
1463 if (cp->val && hdev->discov_timeout > 0) {
1464 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1465 queue_delayed_work(hdev->req_workqueue,
1466 &hdev->discov_off, to);
1467 }
1468
1469 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1470 goto failed;
1471 }
1472
1473 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1474 if (!cmd) {
1475 err = -ENOMEM;
1476 goto failed;
1477 }
1478
1479 /* Cancel any potential discoverable timeout that might be
1480 * still active and store new timeout value. The arming of
1481 * the timeout happens in the complete handler.
1482 */
1483 cancel_delayed_work(&hdev->discov_off);
1484 hdev->discov_timeout = timeout;
1485
1486 if (cp->val)
1487 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1488 else
1489 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1490
1491 /* Limited discoverable mode */
1492 if (cp->val == 0x02)
1493 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1494 else
1495 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1496
1497 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1498 err = 0;
1499
1500 failed:
1501 hci_dev_unlock(hdev);
1502 return err;
1503 }
1504
mgmt_set_connectable_complete(struct hci_dev * hdev,u8 status)1505 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1506 {
1507 struct mgmt_pending_cmd *cmd;
1508
1509 bt_dev_dbg(hdev, "status 0x%02x", status);
1510
1511 hci_dev_lock(hdev);
1512
1513 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1514 if (!cmd)
1515 goto unlock;
1516
1517 if (status) {
1518 u8 mgmt_err = mgmt_status(status);
1519 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1520 goto remove_cmd;
1521 }
1522
1523 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1524 new_settings(hdev, cmd->sk);
1525
1526 remove_cmd:
1527 mgmt_pending_remove(cmd);
1528
1529 unlock:
1530 hci_dev_unlock(hdev);
1531 }
1532
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1533 static int set_connectable_update_settings(struct hci_dev *hdev,
1534 struct sock *sk, u8 val)
1535 {
1536 bool changed = false;
1537 int err;
1538
1539 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1540 changed = true;
1541
1542 if (val) {
1543 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1544 } else {
1545 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1546 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1547 }
1548
1549 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1550 if (err < 0)
1551 return err;
1552
1553 if (changed) {
1554 hci_req_update_scan(hdev);
1555 hci_update_background_scan(hdev);
1556 return new_settings(hdev, sk);
1557 }
1558
1559 return 0;
1560 }
1561
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1562 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1563 u16 len)
1564 {
1565 struct mgmt_mode *cp = data;
1566 struct mgmt_pending_cmd *cmd;
1567 int err;
1568
1569 bt_dev_dbg(hdev, "sock %p", sk);
1570
1571 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1572 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1573 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1574 MGMT_STATUS_REJECTED);
1575
1576 if (cp->val != 0x00 && cp->val != 0x01)
1577 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1578 MGMT_STATUS_INVALID_PARAMS);
1579
1580 hci_dev_lock(hdev);
1581
1582 if (!hdev_is_powered(hdev)) {
1583 err = set_connectable_update_settings(hdev, sk, cp->val);
1584 goto failed;
1585 }
1586
1587 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1588 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1589 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1590 MGMT_STATUS_BUSY);
1591 goto failed;
1592 }
1593
1594 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1595 if (!cmd) {
1596 err = -ENOMEM;
1597 goto failed;
1598 }
1599
1600 if (cp->val) {
1601 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1602 } else {
1603 if (hdev->discov_timeout > 0)
1604 cancel_delayed_work(&hdev->discov_off);
1605
1606 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1607 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1608 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1609 }
1610
1611 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1612 err = 0;
1613
1614 failed:
1615 hci_dev_unlock(hdev);
1616 return err;
1617 }
1618
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1619 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1620 u16 len)
1621 {
1622 struct mgmt_mode *cp = data;
1623 bool changed;
1624 int err;
1625
1626 bt_dev_dbg(hdev, "sock %p", sk);
1627
1628 if (cp->val != 0x00 && cp->val != 0x01)
1629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1630 MGMT_STATUS_INVALID_PARAMS);
1631
1632 hci_dev_lock(hdev);
1633
1634 if (cp->val)
1635 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1636 else
1637 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1638
1639 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1640 if (err < 0)
1641 goto unlock;
1642
1643 if (changed) {
1644 /* In limited privacy mode the change of bondable mode
1645 * may affect the local advertising address.
1646 */
1647 if (hdev_is_powered(hdev) &&
1648 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1649 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1650 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1651 queue_work(hdev->req_workqueue,
1652 &hdev->discoverable_update);
1653
1654 err = new_settings(hdev, sk);
1655 }
1656
1657 unlock:
1658 hci_dev_unlock(hdev);
1659 return err;
1660 }
1661
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1662 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1663 u16 len)
1664 {
1665 struct mgmt_mode *cp = data;
1666 struct mgmt_pending_cmd *cmd;
1667 u8 val, status;
1668 int err;
1669
1670 bt_dev_dbg(hdev, "sock %p", sk);
1671
1672 status = mgmt_bredr_support(hdev);
1673 if (status)
1674 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1675 status);
1676
1677 if (cp->val != 0x00 && cp->val != 0x01)
1678 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1679 MGMT_STATUS_INVALID_PARAMS);
1680
1681 hci_dev_lock(hdev);
1682
1683 if (!hdev_is_powered(hdev)) {
1684 bool changed = false;
1685
1686 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1687 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1688 changed = true;
1689 }
1690
1691 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1692 if (err < 0)
1693 goto failed;
1694
1695 if (changed)
1696 err = new_settings(hdev, sk);
1697
1698 goto failed;
1699 }
1700
1701 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1702 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1703 MGMT_STATUS_BUSY);
1704 goto failed;
1705 }
1706
1707 val = !!cp->val;
1708
1709 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1710 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1711 goto failed;
1712 }
1713
1714 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1715 if (!cmd) {
1716 err = -ENOMEM;
1717 goto failed;
1718 }
1719
1720 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1721 if (err < 0) {
1722 mgmt_pending_remove(cmd);
1723 goto failed;
1724 }
1725
1726 failed:
1727 hci_dev_unlock(hdev);
1728 return err;
1729 }
1730
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1731 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1732 {
1733 struct mgmt_mode *cp = data;
1734 struct mgmt_pending_cmd *cmd;
1735 u8 status;
1736 int err;
1737
1738 bt_dev_dbg(hdev, "sock %p", sk);
1739
1740 status = mgmt_bredr_support(hdev);
1741 if (status)
1742 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1743
1744 if (!lmp_ssp_capable(hdev))
1745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1746 MGMT_STATUS_NOT_SUPPORTED);
1747
1748 if (cp->val != 0x00 && cp->val != 0x01)
1749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1750 MGMT_STATUS_INVALID_PARAMS);
1751
1752 hci_dev_lock(hdev);
1753
1754 if (!hdev_is_powered(hdev)) {
1755 bool changed;
1756
1757 if (cp->val) {
1758 changed = !hci_dev_test_and_set_flag(hdev,
1759 HCI_SSP_ENABLED);
1760 } else {
1761 changed = hci_dev_test_and_clear_flag(hdev,
1762 HCI_SSP_ENABLED);
1763 if (!changed)
1764 changed = hci_dev_test_and_clear_flag(hdev,
1765 HCI_HS_ENABLED);
1766 else
1767 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1768 }
1769
1770 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1771 if (err < 0)
1772 goto failed;
1773
1774 if (changed)
1775 err = new_settings(hdev, sk);
1776
1777 goto failed;
1778 }
1779
1780 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1781 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1782 MGMT_STATUS_BUSY);
1783 goto failed;
1784 }
1785
1786 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1787 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1788 goto failed;
1789 }
1790
1791 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1792 if (!cmd) {
1793 err = -ENOMEM;
1794 goto failed;
1795 }
1796
1797 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1798 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1799 sizeof(cp->val), &cp->val);
1800
1801 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1802 if (err < 0) {
1803 mgmt_pending_remove(cmd);
1804 goto failed;
1805 }
1806
1807 failed:
1808 hci_dev_unlock(hdev);
1809 return err;
1810 }
1811
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1812 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1813 {
1814 struct mgmt_mode *cp = data;
1815 bool changed;
1816 u8 status;
1817 int err;
1818
1819 bt_dev_dbg(hdev, "sock %p", sk);
1820
1821 if (!IS_ENABLED(CONFIG_BT_HS))
1822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1823 MGMT_STATUS_NOT_SUPPORTED);
1824
1825 status = mgmt_bredr_support(hdev);
1826 if (status)
1827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1828
1829 if (!lmp_ssp_capable(hdev))
1830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1831 MGMT_STATUS_NOT_SUPPORTED);
1832
1833 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1835 MGMT_STATUS_REJECTED);
1836
1837 if (cp->val != 0x00 && cp->val != 0x01)
1838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1839 MGMT_STATUS_INVALID_PARAMS);
1840
1841 hci_dev_lock(hdev);
1842
1843 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1844 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1845 MGMT_STATUS_BUSY);
1846 goto unlock;
1847 }
1848
1849 if (cp->val) {
1850 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1851 } else {
1852 if (hdev_is_powered(hdev)) {
1853 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1854 MGMT_STATUS_REJECTED);
1855 goto unlock;
1856 }
1857
1858 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1859 }
1860
1861 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1862 if (err < 0)
1863 goto unlock;
1864
1865 if (changed)
1866 err = new_settings(hdev, sk);
1867
1868 unlock:
1869 hci_dev_unlock(hdev);
1870 return err;
1871 }
1872
le_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1873 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1874 {
1875 struct cmd_lookup match = { NULL, hdev };
1876
1877 hci_dev_lock(hdev);
1878
1879 if (status) {
1880 u8 mgmt_err = mgmt_status(status);
1881
1882 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1883 &mgmt_err);
1884 goto unlock;
1885 }
1886
1887 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1888
1889 new_settings(hdev, match.sk);
1890
1891 if (match.sk)
1892 sock_put(match.sk);
1893
1894 /* Make sure the controller has a good default for
1895 * advertising data. Restrict the update to when LE
1896 * has actually been enabled. During power on, the
1897 * update in powered_update_hci will take care of it.
1898 */
1899 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1900 struct hci_request req;
1901 hci_req_init(&req, hdev);
1902 if (ext_adv_capable(hdev)) {
1903 int err;
1904
1905 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1906 if (!err)
1907 __hci_req_update_scan_rsp_data(&req, 0x00);
1908 } else {
1909 __hci_req_update_adv_data(&req, 0x00);
1910 __hci_req_update_scan_rsp_data(&req, 0x00);
1911 }
1912 hci_req_run(&req, NULL);
1913 hci_update_background_scan(hdev);
1914 }
1915
1916 unlock:
1917 hci_dev_unlock(hdev);
1918 }
1919
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1920 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1921 {
1922 struct mgmt_mode *cp = data;
1923 struct hci_cp_write_le_host_supported hci_cp;
1924 struct mgmt_pending_cmd *cmd;
1925 struct hci_request req;
1926 int err;
1927 u8 val, enabled;
1928
1929 bt_dev_dbg(hdev, "sock %p", sk);
1930
1931 if (!lmp_le_capable(hdev))
1932 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1933 MGMT_STATUS_NOT_SUPPORTED);
1934
1935 if (cp->val != 0x00 && cp->val != 0x01)
1936 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1937 MGMT_STATUS_INVALID_PARAMS);
1938
1939 /* Bluetooth single mode LE only controllers or dual-mode
1940 * controllers configured as LE only devices, do not allow
1941 * switching LE off. These have either LE enabled explicitly
1942 * or BR/EDR has been previously switched off.
1943 *
1944 * When trying to enable an already enabled LE, then gracefully
1945 * send a positive response. Trying to disable it however will
1946 * result into rejection.
1947 */
1948 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1949 if (cp->val == 0x01)
1950 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1951
1952 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1953 MGMT_STATUS_REJECTED);
1954 }
1955
1956 hci_dev_lock(hdev);
1957
1958 val = !!cp->val;
1959 enabled = lmp_host_le_capable(hdev);
1960
1961 if (!val)
1962 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1963
1964 if (!hdev_is_powered(hdev) || val == enabled) {
1965 bool changed = false;
1966
1967 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1968 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1969 changed = true;
1970 }
1971
1972 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1973 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1974 changed = true;
1975 }
1976
1977 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1978 if (err < 0)
1979 goto unlock;
1980
1981 if (changed)
1982 err = new_settings(hdev, sk);
1983
1984 goto unlock;
1985 }
1986
1987 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1988 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1989 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1990 MGMT_STATUS_BUSY);
1991 goto unlock;
1992 }
1993
1994 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1995 if (!cmd) {
1996 err = -ENOMEM;
1997 goto unlock;
1998 }
1999
2000 hci_req_init(&req, hdev);
2001
2002 memset(&hci_cp, 0, sizeof(hci_cp));
2003
2004 if (val) {
2005 hci_cp.le = val;
2006 hci_cp.simul = 0x00;
2007 } else {
2008 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2009 __hci_req_disable_advertising(&req);
2010
2011 if (ext_adv_capable(hdev))
2012 __hci_req_clear_ext_adv_sets(&req);
2013 }
2014
2015 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2016 &hci_cp);
2017
2018 err = hci_req_run(&req, le_enable_complete);
2019 if (err < 0)
2020 mgmt_pending_remove(cmd);
2021
2022 unlock:
2023 hci_dev_unlock(hdev);
2024 return err;
2025 }
2026
2027 /* This is a helper function to test for pending mgmt commands that can
2028 * cause CoD or EIR HCI commands. We can only allow one such pending
2029 * mgmt command at a time since otherwise we cannot easily track what
2030 * the current values are, will be, and based on that calculate if a new
2031 * HCI command needs to be sent and if yes with what value.
2032 */
pending_eir_or_class(struct hci_dev * hdev)2033 static bool pending_eir_or_class(struct hci_dev *hdev)
2034 {
2035 struct mgmt_pending_cmd *cmd;
2036
2037 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2038 switch (cmd->opcode) {
2039 case MGMT_OP_ADD_UUID:
2040 case MGMT_OP_REMOVE_UUID:
2041 case MGMT_OP_SET_DEV_CLASS:
2042 case MGMT_OP_SET_POWERED:
2043 return true;
2044 }
2045 }
2046
2047 return false;
2048 }
2049
2050 static const u8 bluetooth_base_uuid[] = {
2051 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2052 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2053 };
2054
get_uuid_size(const u8 * uuid)2055 static u8 get_uuid_size(const u8 *uuid)
2056 {
2057 u32 val;
2058
2059 if (memcmp(uuid, bluetooth_base_uuid, 12))
2060 return 128;
2061
2062 val = get_unaligned_le32(&uuid[12]);
2063 if (val > 0xffff)
2064 return 32;
2065
2066 return 16;
2067 }
2068
mgmt_class_complete(struct hci_dev * hdev,u16 mgmt_op,u8 status)2069 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2070 {
2071 struct mgmt_pending_cmd *cmd;
2072
2073 hci_dev_lock(hdev);
2074
2075 cmd = pending_find(mgmt_op, hdev);
2076 if (!cmd)
2077 goto unlock;
2078
2079 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2080 mgmt_status(status), hdev->dev_class, 3);
2081
2082 mgmt_pending_remove(cmd);
2083
2084 unlock:
2085 hci_dev_unlock(hdev);
2086 }
2087
add_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2088 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2089 {
2090 bt_dev_dbg(hdev, "status 0x%02x", status);
2091
2092 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2093 }
2094
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2095 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2096 {
2097 struct mgmt_cp_add_uuid *cp = data;
2098 struct mgmt_pending_cmd *cmd;
2099 struct hci_request req;
2100 struct bt_uuid *uuid;
2101 int err;
2102
2103 bt_dev_dbg(hdev, "sock %p", sk);
2104
2105 hci_dev_lock(hdev);
2106
2107 if (pending_eir_or_class(hdev)) {
2108 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2109 MGMT_STATUS_BUSY);
2110 goto failed;
2111 }
2112
2113 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2114 if (!uuid) {
2115 err = -ENOMEM;
2116 goto failed;
2117 }
2118
2119 memcpy(uuid->uuid, cp->uuid, 16);
2120 uuid->svc_hint = cp->svc_hint;
2121 uuid->size = get_uuid_size(cp->uuid);
2122
2123 list_add_tail(&uuid->list, &hdev->uuids);
2124
2125 hci_req_init(&req, hdev);
2126
2127 __hci_req_update_class(&req);
2128 __hci_req_update_eir(&req);
2129
2130 err = hci_req_run(&req, add_uuid_complete);
2131 if (err < 0) {
2132 if (err != -ENODATA)
2133 goto failed;
2134
2135 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2136 hdev->dev_class, 3);
2137 goto failed;
2138 }
2139
2140 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2141 if (!cmd) {
2142 err = -ENOMEM;
2143 goto failed;
2144 }
2145
2146 err = 0;
2147
2148 failed:
2149 hci_dev_unlock(hdev);
2150 return err;
2151 }
2152
enable_service_cache(struct hci_dev * hdev)2153 static bool enable_service_cache(struct hci_dev *hdev)
2154 {
2155 if (!hdev_is_powered(hdev))
2156 return false;
2157
2158 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2159 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2160 CACHE_TIMEOUT);
2161 return true;
2162 }
2163
2164 return false;
2165 }
2166
remove_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2167 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2168 {
2169 bt_dev_dbg(hdev, "status 0x%02x", status);
2170
2171 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2172 }
2173
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2174 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2175 u16 len)
2176 {
2177 struct mgmt_cp_remove_uuid *cp = data;
2178 struct mgmt_pending_cmd *cmd;
2179 struct bt_uuid *match, *tmp;
2180 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2181 struct hci_request req;
2182 int err, found;
2183
2184 bt_dev_dbg(hdev, "sock %p", sk);
2185
2186 hci_dev_lock(hdev);
2187
2188 if (pending_eir_or_class(hdev)) {
2189 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2190 MGMT_STATUS_BUSY);
2191 goto unlock;
2192 }
2193
2194 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2195 hci_uuids_clear(hdev);
2196
2197 if (enable_service_cache(hdev)) {
2198 err = mgmt_cmd_complete(sk, hdev->id,
2199 MGMT_OP_REMOVE_UUID,
2200 0, hdev->dev_class, 3);
2201 goto unlock;
2202 }
2203
2204 goto update_class;
2205 }
2206
2207 found = 0;
2208
2209 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2210 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2211 continue;
2212
2213 list_del(&match->list);
2214 kfree(match);
2215 found++;
2216 }
2217
2218 if (found == 0) {
2219 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2220 MGMT_STATUS_INVALID_PARAMS);
2221 goto unlock;
2222 }
2223
2224 update_class:
2225 hci_req_init(&req, hdev);
2226
2227 __hci_req_update_class(&req);
2228 __hci_req_update_eir(&req);
2229
2230 err = hci_req_run(&req, remove_uuid_complete);
2231 if (err < 0) {
2232 if (err != -ENODATA)
2233 goto unlock;
2234
2235 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2236 hdev->dev_class, 3);
2237 goto unlock;
2238 }
2239
2240 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2241 if (!cmd) {
2242 err = -ENOMEM;
2243 goto unlock;
2244 }
2245
2246 err = 0;
2247
2248 unlock:
2249 hci_dev_unlock(hdev);
2250 return err;
2251 }
2252
set_class_complete(struct hci_dev * hdev,u8 status,u16 opcode)2253 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2254 {
2255 bt_dev_dbg(hdev, "status 0x%02x", status);
2256
2257 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2258 }
2259
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2260 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2261 u16 len)
2262 {
2263 struct mgmt_cp_set_dev_class *cp = data;
2264 struct mgmt_pending_cmd *cmd;
2265 struct hci_request req;
2266 int err;
2267
2268 bt_dev_dbg(hdev, "sock %p", sk);
2269
2270 if (!lmp_bredr_capable(hdev))
2271 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2272 MGMT_STATUS_NOT_SUPPORTED);
2273
2274 hci_dev_lock(hdev);
2275
2276 if (pending_eir_or_class(hdev)) {
2277 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2278 MGMT_STATUS_BUSY);
2279 goto unlock;
2280 }
2281
2282 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2283 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2284 MGMT_STATUS_INVALID_PARAMS);
2285 goto unlock;
2286 }
2287
2288 hdev->major_class = cp->major;
2289 hdev->minor_class = cp->minor;
2290
2291 if (!hdev_is_powered(hdev)) {
2292 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2293 hdev->dev_class, 3);
2294 goto unlock;
2295 }
2296
2297 hci_req_init(&req, hdev);
2298
2299 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2300 hci_dev_unlock(hdev);
2301 cancel_delayed_work_sync(&hdev->service_cache);
2302 hci_dev_lock(hdev);
2303 __hci_req_update_eir(&req);
2304 }
2305
2306 __hci_req_update_class(&req);
2307
2308 err = hci_req_run(&req, set_class_complete);
2309 if (err < 0) {
2310 if (err != -ENODATA)
2311 goto unlock;
2312
2313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2314 hdev->dev_class, 3);
2315 goto unlock;
2316 }
2317
2318 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2319 if (!cmd) {
2320 err = -ENOMEM;
2321 goto unlock;
2322 }
2323
2324 err = 0;
2325
2326 unlock:
2327 hci_dev_unlock(hdev);
2328 return err;
2329 }
2330
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2331 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2332 u16 len)
2333 {
2334 struct mgmt_cp_load_link_keys *cp = data;
2335 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2336 sizeof(struct mgmt_link_key_info));
2337 u16 key_count, expected_len;
2338 bool changed;
2339 int i;
2340
2341 bt_dev_dbg(hdev, "sock %p", sk);
2342
2343 if (!lmp_bredr_capable(hdev))
2344 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2345 MGMT_STATUS_NOT_SUPPORTED);
2346
2347 key_count = __le16_to_cpu(cp->key_count);
2348 if (key_count > max_key_count) {
2349 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2350 key_count);
2351 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2352 MGMT_STATUS_INVALID_PARAMS);
2353 }
2354
2355 expected_len = struct_size(cp, keys, key_count);
2356 if (expected_len != len) {
2357 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2358 expected_len, len);
2359 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2360 MGMT_STATUS_INVALID_PARAMS);
2361 }
2362
2363 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2364 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2365 MGMT_STATUS_INVALID_PARAMS);
2366
2367 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2368 key_count);
2369
2370 for (i = 0; i < key_count; i++) {
2371 struct mgmt_link_key_info *key = &cp->keys[i];
2372
2373 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2374 return mgmt_cmd_status(sk, hdev->id,
2375 MGMT_OP_LOAD_LINK_KEYS,
2376 MGMT_STATUS_INVALID_PARAMS);
2377 }
2378
2379 hci_dev_lock(hdev);
2380
2381 hci_link_keys_clear(hdev);
2382
2383 if (cp->debug_keys)
2384 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2385 else
2386 changed = hci_dev_test_and_clear_flag(hdev,
2387 HCI_KEEP_DEBUG_KEYS);
2388
2389 if (changed)
2390 new_settings(hdev, NULL);
2391
2392 for (i = 0; i < key_count; i++) {
2393 struct mgmt_link_key_info *key = &cp->keys[i];
2394
2395 if (hci_is_blocked_key(hdev,
2396 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2397 key->val)) {
2398 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2399 &key->addr.bdaddr);
2400 continue;
2401 }
2402
2403 /* Always ignore debug keys and require a new pairing if
2404 * the user wants to use them.
2405 */
2406 if (key->type == HCI_LK_DEBUG_COMBINATION)
2407 continue;
2408
2409 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2410 key->type, key->pin_len, NULL);
2411 }
2412
2413 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2414
2415 hci_dev_unlock(hdev);
2416
2417 return 0;
2418 }
2419
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2420 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2421 u8 addr_type, struct sock *skip_sk)
2422 {
2423 struct mgmt_ev_device_unpaired ev;
2424
2425 bacpy(&ev.addr.bdaddr, bdaddr);
2426 ev.addr.type = addr_type;
2427
2428 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2429 skip_sk);
2430 }
2431
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2432 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2433 u16 len)
2434 {
2435 struct mgmt_cp_unpair_device *cp = data;
2436 struct mgmt_rp_unpair_device rp;
2437 struct hci_conn_params *params;
2438 struct mgmt_pending_cmd *cmd;
2439 struct hci_conn *conn;
2440 u8 addr_type;
2441 int err;
2442
2443 memset(&rp, 0, sizeof(rp));
2444 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2445 rp.addr.type = cp->addr.type;
2446
2447 if (!bdaddr_type_is_valid(cp->addr.type))
2448 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2449 MGMT_STATUS_INVALID_PARAMS,
2450 &rp, sizeof(rp));
2451
2452 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2453 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2454 MGMT_STATUS_INVALID_PARAMS,
2455 &rp, sizeof(rp));
2456
2457 hci_dev_lock(hdev);
2458
2459 if (!hdev_is_powered(hdev)) {
2460 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2461 MGMT_STATUS_NOT_POWERED, &rp,
2462 sizeof(rp));
2463 goto unlock;
2464 }
2465
2466 if (cp->addr.type == BDADDR_BREDR) {
2467 /* If disconnection is requested, then look up the
2468 * connection. If the remote device is connected, it
2469 * will be later used to terminate the link.
2470 *
2471 * Setting it to NULL explicitly will cause no
2472 * termination of the link.
2473 */
2474 if (cp->disconnect)
2475 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2476 &cp->addr.bdaddr);
2477 else
2478 conn = NULL;
2479
2480 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2481 if (err < 0) {
2482 err = mgmt_cmd_complete(sk, hdev->id,
2483 MGMT_OP_UNPAIR_DEVICE,
2484 MGMT_STATUS_NOT_PAIRED, &rp,
2485 sizeof(rp));
2486 goto unlock;
2487 }
2488
2489 goto done;
2490 }
2491
2492 /* LE address type */
2493 addr_type = le_addr_type(cp->addr.type);
2494
2495 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2496 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2497 if (err < 0) {
2498 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2499 MGMT_STATUS_NOT_PAIRED, &rp,
2500 sizeof(rp));
2501 goto unlock;
2502 }
2503
2504 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2505 if (!conn) {
2506 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2507 goto done;
2508 }
2509
2510
2511 /* Defer clearing up the connection parameters until closing to
2512 * give a chance of keeping them if a repairing happens.
2513 */
2514 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2515
2516 /* Disable auto-connection parameters if present */
2517 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2518 if (params) {
2519 if (params->explicit_connect)
2520 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2521 else
2522 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2523 }
2524
2525 /* If disconnection is not requested, then clear the connection
2526 * variable so that the link is not terminated.
2527 */
2528 if (!cp->disconnect)
2529 conn = NULL;
2530
2531 done:
2532 /* If the connection variable is set, then termination of the
2533 * link is requested.
2534 */
2535 if (!conn) {
2536 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2537 &rp, sizeof(rp));
2538 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2539 goto unlock;
2540 }
2541
2542 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2543 sizeof(*cp));
2544 if (!cmd) {
2545 err = -ENOMEM;
2546 goto unlock;
2547 }
2548
2549 cmd->cmd_complete = addr_cmd_complete;
2550
2551 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2552 if (err < 0)
2553 mgmt_pending_remove(cmd);
2554
2555 unlock:
2556 hci_dev_unlock(hdev);
2557 return err;
2558 }
2559
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2560 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2561 u16 len)
2562 {
2563 struct mgmt_cp_disconnect *cp = data;
2564 struct mgmt_rp_disconnect rp;
2565 struct mgmt_pending_cmd *cmd;
2566 struct hci_conn *conn;
2567 int err;
2568
2569 bt_dev_dbg(hdev, "sock %p", sk);
2570
2571 memset(&rp, 0, sizeof(rp));
2572 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2573 rp.addr.type = cp->addr.type;
2574
2575 if (!bdaddr_type_is_valid(cp->addr.type))
2576 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2577 MGMT_STATUS_INVALID_PARAMS,
2578 &rp, sizeof(rp));
2579
2580 hci_dev_lock(hdev);
2581
2582 if (!test_bit(HCI_UP, &hdev->flags)) {
2583 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2584 MGMT_STATUS_NOT_POWERED, &rp,
2585 sizeof(rp));
2586 goto failed;
2587 }
2588
2589 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2590 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2591 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2592 goto failed;
2593 }
2594
2595 if (cp->addr.type == BDADDR_BREDR)
2596 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2597 &cp->addr.bdaddr);
2598 else
2599 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2600 le_addr_type(cp->addr.type));
2601
2602 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2603 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2604 MGMT_STATUS_NOT_CONNECTED, &rp,
2605 sizeof(rp));
2606 goto failed;
2607 }
2608
2609 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2610 if (!cmd) {
2611 err = -ENOMEM;
2612 goto failed;
2613 }
2614
2615 cmd->cmd_complete = generic_cmd_complete;
2616
2617 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2618 if (err < 0)
2619 mgmt_pending_remove(cmd);
2620
2621 failed:
2622 hci_dev_unlock(hdev);
2623 return err;
2624 }
2625
link_to_bdaddr(u8 link_type,u8 addr_type)2626 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2627 {
2628 switch (link_type) {
2629 case LE_LINK:
2630 switch (addr_type) {
2631 case ADDR_LE_DEV_PUBLIC:
2632 return BDADDR_LE_PUBLIC;
2633
2634 default:
2635 /* Fallback to LE Random address type */
2636 return BDADDR_LE_RANDOM;
2637 }
2638
2639 default:
2640 /* Fallback to BR/EDR type */
2641 return BDADDR_BREDR;
2642 }
2643 }
2644
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)2645 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2646 u16 data_len)
2647 {
2648 struct mgmt_rp_get_connections *rp;
2649 struct hci_conn *c;
2650 int err;
2651 u16 i;
2652
2653 bt_dev_dbg(hdev, "sock %p", sk);
2654
2655 hci_dev_lock(hdev);
2656
2657 if (!hdev_is_powered(hdev)) {
2658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2659 MGMT_STATUS_NOT_POWERED);
2660 goto unlock;
2661 }
2662
2663 i = 0;
2664 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2665 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2666 i++;
2667 }
2668
2669 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2670 if (!rp) {
2671 err = -ENOMEM;
2672 goto unlock;
2673 }
2674
2675 i = 0;
2676 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2677 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2678 continue;
2679 bacpy(&rp->addr[i].bdaddr, &c->dst);
2680 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2681 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2682 continue;
2683 i++;
2684 }
2685
2686 rp->conn_count = cpu_to_le16(i);
2687
2688 /* Recalculate length in case of filtered SCO connections, etc */
2689 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2690 struct_size(rp, addr, i));
2691
2692 kfree(rp);
2693
2694 unlock:
2695 hci_dev_unlock(hdev);
2696 return err;
2697 }
2698
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)2699 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2700 struct mgmt_cp_pin_code_neg_reply *cp)
2701 {
2702 struct mgmt_pending_cmd *cmd;
2703 int err;
2704
2705 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2706 sizeof(*cp));
2707 if (!cmd)
2708 return -ENOMEM;
2709
2710 cmd->cmd_complete = addr_cmd_complete;
2711
2712 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2713 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2714 if (err < 0)
2715 mgmt_pending_remove(cmd);
2716
2717 return err;
2718 }
2719
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2720 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2721 u16 len)
2722 {
2723 struct hci_conn *conn;
2724 struct mgmt_cp_pin_code_reply *cp = data;
2725 struct hci_cp_pin_code_reply reply;
2726 struct mgmt_pending_cmd *cmd;
2727 int err;
2728
2729 bt_dev_dbg(hdev, "sock %p", sk);
2730
2731 hci_dev_lock(hdev);
2732
2733 if (!hdev_is_powered(hdev)) {
2734 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2735 MGMT_STATUS_NOT_POWERED);
2736 goto failed;
2737 }
2738
2739 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2740 if (!conn) {
2741 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2742 MGMT_STATUS_NOT_CONNECTED);
2743 goto failed;
2744 }
2745
2746 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2747 struct mgmt_cp_pin_code_neg_reply ncp;
2748
2749 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2750
2751 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2752
2753 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2754 if (err >= 0)
2755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2756 MGMT_STATUS_INVALID_PARAMS);
2757
2758 goto failed;
2759 }
2760
2761 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2762 if (!cmd) {
2763 err = -ENOMEM;
2764 goto failed;
2765 }
2766
2767 cmd->cmd_complete = addr_cmd_complete;
2768
2769 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2770 reply.pin_len = cp->pin_len;
2771 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2772
2773 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2774 if (err < 0)
2775 mgmt_pending_remove(cmd);
2776
2777 failed:
2778 hci_dev_unlock(hdev);
2779 return err;
2780 }
2781
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2782 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2783 u16 len)
2784 {
2785 struct mgmt_cp_set_io_capability *cp = data;
2786
2787 bt_dev_dbg(hdev, "sock %p", sk);
2788
2789 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2790 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2791 MGMT_STATUS_INVALID_PARAMS);
2792
2793 hci_dev_lock(hdev);
2794
2795 hdev->io_capability = cp->io_capability;
2796
2797 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2798
2799 hci_dev_unlock(hdev);
2800
2801 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2802 NULL, 0);
2803 }
2804
find_pairing(struct hci_conn * conn)2805 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2806 {
2807 struct hci_dev *hdev = conn->hdev;
2808 struct mgmt_pending_cmd *cmd;
2809
2810 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2811 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2812 continue;
2813
2814 if (cmd->user_data != conn)
2815 continue;
2816
2817 return cmd;
2818 }
2819
2820 return NULL;
2821 }
2822
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)2823 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2824 {
2825 struct mgmt_rp_pair_device rp;
2826 struct hci_conn *conn = cmd->user_data;
2827 int err;
2828
2829 bacpy(&rp.addr.bdaddr, &conn->dst);
2830 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2831
2832 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2833 status, &rp, sizeof(rp));
2834
2835 /* So we don't get further callbacks for this connection */
2836 conn->connect_cfm_cb = NULL;
2837 conn->security_cfm_cb = NULL;
2838 conn->disconn_cfm_cb = NULL;
2839
2840 hci_conn_drop(conn);
2841
2842 /* The device is paired so there is no need to remove
2843 * its connection parameters anymore.
2844 */
2845 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2846
2847 hci_conn_put(conn);
2848
2849 return err;
2850 }
2851
mgmt_smp_complete(struct hci_conn * conn,bool complete)2852 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2853 {
2854 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2855 struct mgmt_pending_cmd *cmd;
2856
2857 cmd = find_pairing(conn);
2858 if (cmd) {
2859 cmd->cmd_complete(cmd, status);
2860 mgmt_pending_remove(cmd);
2861 }
2862 }
2863
pairing_complete_cb(struct hci_conn * conn,u8 status)2864 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2865 {
2866 struct mgmt_pending_cmd *cmd;
2867
2868 BT_DBG("status %u", status);
2869
2870 cmd = find_pairing(conn);
2871 if (!cmd) {
2872 BT_DBG("Unable to find a pending command");
2873 return;
2874 }
2875
2876 cmd->cmd_complete(cmd, mgmt_status(status));
2877 mgmt_pending_remove(cmd);
2878 }
2879
le_pairing_complete_cb(struct hci_conn * conn,u8 status)2880 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2881 {
2882 struct mgmt_pending_cmd *cmd;
2883
2884 BT_DBG("status %u", status);
2885
2886 if (!status)
2887 return;
2888
2889 cmd = find_pairing(conn);
2890 if (!cmd) {
2891 BT_DBG("Unable to find a pending command");
2892 return;
2893 }
2894
2895 cmd->cmd_complete(cmd, mgmt_status(status));
2896 mgmt_pending_remove(cmd);
2897 }
2898
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2899 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2900 u16 len)
2901 {
2902 struct mgmt_cp_pair_device *cp = data;
2903 struct mgmt_rp_pair_device rp;
2904 struct mgmt_pending_cmd *cmd;
2905 u8 sec_level, auth_type;
2906 struct hci_conn *conn;
2907 int err;
2908
2909 bt_dev_dbg(hdev, "sock %p", sk);
2910
2911 memset(&rp, 0, sizeof(rp));
2912 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2913 rp.addr.type = cp->addr.type;
2914
2915 if (!bdaddr_type_is_valid(cp->addr.type))
2916 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2917 MGMT_STATUS_INVALID_PARAMS,
2918 &rp, sizeof(rp));
2919
2920 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2921 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2922 MGMT_STATUS_INVALID_PARAMS,
2923 &rp, sizeof(rp));
2924
2925 hci_dev_lock(hdev);
2926
2927 if (!hdev_is_powered(hdev)) {
2928 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2929 MGMT_STATUS_NOT_POWERED, &rp,
2930 sizeof(rp));
2931 goto unlock;
2932 }
2933
2934 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2935 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2936 MGMT_STATUS_ALREADY_PAIRED, &rp,
2937 sizeof(rp));
2938 goto unlock;
2939 }
2940
2941 sec_level = BT_SECURITY_MEDIUM;
2942 auth_type = HCI_AT_DEDICATED_BONDING;
2943
2944 if (cp->addr.type == BDADDR_BREDR) {
2945 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2946 auth_type, CONN_REASON_PAIR_DEVICE);
2947 } else {
2948 u8 addr_type = le_addr_type(cp->addr.type);
2949 struct hci_conn_params *p;
2950
2951 /* When pairing a new device, it is expected to remember
2952 * this device for future connections. Adding the connection
2953 * parameter information ahead of time allows tracking
2954 * of the slave preferred values and will speed up any
2955 * further connection establishment.
2956 *
2957 * If connection parameters already exist, then they
2958 * will be kept and this function does nothing.
2959 */
2960 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2961
2962 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2963 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2964
2965 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2966 sec_level, HCI_LE_CONN_TIMEOUT,
2967 CONN_REASON_PAIR_DEVICE);
2968 }
2969
2970 if (IS_ERR(conn)) {
2971 int status;
2972
2973 if (PTR_ERR(conn) == -EBUSY)
2974 status = MGMT_STATUS_BUSY;
2975 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2976 status = MGMT_STATUS_NOT_SUPPORTED;
2977 else if (PTR_ERR(conn) == -ECONNREFUSED)
2978 status = MGMT_STATUS_REJECTED;
2979 else
2980 status = MGMT_STATUS_CONNECT_FAILED;
2981
2982 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2983 status, &rp, sizeof(rp));
2984 goto unlock;
2985 }
2986
2987 if (conn->connect_cfm_cb) {
2988 hci_conn_drop(conn);
2989 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2990 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2991 goto unlock;
2992 }
2993
2994 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2995 if (!cmd) {
2996 err = -ENOMEM;
2997 hci_conn_drop(conn);
2998 goto unlock;
2999 }
3000
3001 cmd->cmd_complete = pairing_complete;
3002
3003 /* For LE, just connecting isn't a proof that the pairing finished */
3004 if (cp->addr.type == BDADDR_BREDR) {
3005 conn->connect_cfm_cb = pairing_complete_cb;
3006 conn->security_cfm_cb = pairing_complete_cb;
3007 conn->disconn_cfm_cb = pairing_complete_cb;
3008 } else {
3009 conn->connect_cfm_cb = le_pairing_complete_cb;
3010 conn->security_cfm_cb = le_pairing_complete_cb;
3011 conn->disconn_cfm_cb = le_pairing_complete_cb;
3012 }
3013
3014 conn->io_capability = cp->io_cap;
3015 cmd->user_data = hci_conn_get(conn);
3016
3017 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3018 hci_conn_security(conn, sec_level, auth_type, true)) {
3019 cmd->cmd_complete(cmd, 0);
3020 mgmt_pending_remove(cmd);
3021 }
3022
3023 err = 0;
3024
3025 unlock:
3026 hci_dev_unlock(hdev);
3027 return err;
3028 }
3029
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3030 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3031 u16 len)
3032 {
3033 struct mgmt_addr_info *addr = data;
3034 struct mgmt_pending_cmd *cmd;
3035 struct hci_conn *conn;
3036 int err;
3037
3038 bt_dev_dbg(hdev, "sock %p", sk);
3039
3040 hci_dev_lock(hdev);
3041
3042 if (!hdev_is_powered(hdev)) {
3043 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3044 MGMT_STATUS_NOT_POWERED);
3045 goto unlock;
3046 }
3047
3048 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3049 if (!cmd) {
3050 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3051 MGMT_STATUS_INVALID_PARAMS);
3052 goto unlock;
3053 }
3054
3055 conn = cmd->user_data;
3056
3057 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3058 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3059 MGMT_STATUS_INVALID_PARAMS);
3060 goto unlock;
3061 }
3062
3063 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3064 mgmt_pending_remove(cmd);
3065
3066 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3067 addr, sizeof(*addr));
3068
3069 /* Since user doesn't want to proceed with the connection, abort any
3070 * ongoing pairing and then terminate the link if it was created
3071 * because of the pair device action.
3072 */
3073 if (addr->type == BDADDR_BREDR)
3074 hci_remove_link_key(hdev, &addr->bdaddr);
3075 else
3076 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3077 le_addr_type(addr->type));
3078
3079 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3080 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3081
3082 unlock:
3083 hci_dev_unlock(hdev);
3084 return err;
3085 }
3086
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3087 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3088 struct mgmt_addr_info *addr, u16 mgmt_op,
3089 u16 hci_op, __le32 passkey)
3090 {
3091 struct mgmt_pending_cmd *cmd;
3092 struct hci_conn *conn;
3093 int err;
3094
3095 hci_dev_lock(hdev);
3096
3097 if (!hdev_is_powered(hdev)) {
3098 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3099 MGMT_STATUS_NOT_POWERED, addr,
3100 sizeof(*addr));
3101 goto done;
3102 }
3103
3104 if (addr->type == BDADDR_BREDR)
3105 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3106 else
3107 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3108 le_addr_type(addr->type));
3109
3110 if (!conn) {
3111 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3112 MGMT_STATUS_NOT_CONNECTED, addr,
3113 sizeof(*addr));
3114 goto done;
3115 }
3116
3117 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3118 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3119 if (!err)
3120 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3121 MGMT_STATUS_SUCCESS, addr,
3122 sizeof(*addr));
3123 else
3124 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3125 MGMT_STATUS_FAILED, addr,
3126 sizeof(*addr));
3127
3128 goto done;
3129 }
3130
3131 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3132 if (!cmd) {
3133 err = -ENOMEM;
3134 goto done;
3135 }
3136
3137 cmd->cmd_complete = addr_cmd_complete;
3138
3139 /* Continue with pairing via HCI */
3140 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3141 struct hci_cp_user_passkey_reply cp;
3142
3143 bacpy(&cp.bdaddr, &addr->bdaddr);
3144 cp.passkey = passkey;
3145 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3146 } else
3147 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3148 &addr->bdaddr);
3149
3150 if (err < 0)
3151 mgmt_pending_remove(cmd);
3152
3153 done:
3154 hci_dev_unlock(hdev);
3155 return err;
3156 }
3157
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3158 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3159 void *data, u16 len)
3160 {
3161 struct mgmt_cp_pin_code_neg_reply *cp = data;
3162
3163 bt_dev_dbg(hdev, "sock %p", sk);
3164
3165 return user_pairing_resp(sk, hdev, &cp->addr,
3166 MGMT_OP_PIN_CODE_NEG_REPLY,
3167 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3168 }
3169
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3170 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3171 u16 len)
3172 {
3173 struct mgmt_cp_user_confirm_reply *cp = data;
3174
3175 bt_dev_dbg(hdev, "sock %p", sk);
3176
3177 if (len != sizeof(*cp))
3178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3179 MGMT_STATUS_INVALID_PARAMS);
3180
3181 return user_pairing_resp(sk, hdev, &cp->addr,
3182 MGMT_OP_USER_CONFIRM_REPLY,
3183 HCI_OP_USER_CONFIRM_REPLY, 0);
3184 }
3185
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3186 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3187 void *data, u16 len)
3188 {
3189 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3190
3191 bt_dev_dbg(hdev, "sock %p", sk);
3192
3193 return user_pairing_resp(sk, hdev, &cp->addr,
3194 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3195 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3196 }
3197
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3198 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3199 u16 len)
3200 {
3201 struct mgmt_cp_user_passkey_reply *cp = data;
3202
3203 bt_dev_dbg(hdev, "sock %p", sk);
3204
3205 return user_pairing_resp(sk, hdev, &cp->addr,
3206 MGMT_OP_USER_PASSKEY_REPLY,
3207 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3208 }
3209
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3210 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3211 void *data, u16 len)
3212 {
3213 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3214
3215 bt_dev_dbg(hdev, "sock %p", sk);
3216
3217 return user_pairing_resp(sk, hdev, &cp->addr,
3218 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3219 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3220 }
3221
adv_expire(struct hci_dev * hdev,u32 flags)3222 static void adv_expire(struct hci_dev *hdev, u32 flags)
3223 {
3224 struct adv_info *adv_instance;
3225 struct hci_request req;
3226 int err;
3227
3228 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3229 if (!adv_instance)
3230 return;
3231
3232 /* stop if current instance doesn't need to be changed */
3233 if (!(adv_instance->flags & flags))
3234 return;
3235
3236 cancel_adv_timeout(hdev);
3237
3238 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3239 if (!adv_instance)
3240 return;
3241
3242 hci_req_init(&req, hdev);
3243 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3244 true);
3245 if (err)
3246 return;
3247
3248 hci_req_run(&req, NULL);
3249 }
3250
set_name_complete(struct hci_dev * hdev,u8 status,u16 opcode)3251 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3252 {
3253 struct mgmt_cp_set_local_name *cp;
3254 struct mgmt_pending_cmd *cmd;
3255
3256 bt_dev_dbg(hdev, "status 0x%02x", status);
3257
3258 hci_dev_lock(hdev);
3259
3260 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3261 if (!cmd)
3262 goto unlock;
3263
3264 cp = cmd->param;
3265
3266 if (status) {
3267 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3268 mgmt_status(status));
3269 } else {
3270 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3271 cp, sizeof(*cp));
3272
3273 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3274 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3275 }
3276
3277 mgmt_pending_remove(cmd);
3278
3279 unlock:
3280 hci_dev_unlock(hdev);
3281 }
3282
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3283 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3284 u16 len)
3285 {
3286 struct mgmt_cp_set_local_name *cp = data;
3287 struct mgmt_pending_cmd *cmd;
3288 struct hci_request req;
3289 int err;
3290
3291 bt_dev_dbg(hdev, "sock %p", sk);
3292
3293 hci_dev_lock(hdev);
3294
3295 /* If the old values are the same as the new ones just return a
3296 * direct command complete event.
3297 */
3298 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3299 !memcmp(hdev->short_name, cp->short_name,
3300 sizeof(hdev->short_name))) {
3301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3302 data, len);
3303 goto failed;
3304 }
3305
3306 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3307
3308 if (!hdev_is_powered(hdev)) {
3309 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3310
3311 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3312 data, len);
3313 if (err < 0)
3314 goto failed;
3315
3316 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3317 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3318 ext_info_changed(hdev, sk);
3319
3320 goto failed;
3321 }
3322
3323 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3324 if (!cmd) {
3325 err = -ENOMEM;
3326 goto failed;
3327 }
3328
3329 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3330
3331 hci_req_init(&req, hdev);
3332
3333 if (lmp_bredr_capable(hdev)) {
3334 __hci_req_update_name(&req);
3335 __hci_req_update_eir(&req);
3336 }
3337
3338 /* The name is stored in the scan response data and so
3339 * no need to udpate the advertising data here.
3340 */
3341 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3342 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3343
3344 err = hci_req_run(&req, set_name_complete);
3345 if (err < 0)
3346 mgmt_pending_remove(cmd);
3347
3348 failed:
3349 hci_dev_unlock(hdev);
3350 return err;
3351 }
3352
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3353 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3354 u16 len)
3355 {
3356 struct mgmt_cp_set_appearance *cp = data;
3357 u16 appearance;
3358 int err;
3359
3360 bt_dev_dbg(hdev, "sock %p", sk);
3361
3362 if (!lmp_le_capable(hdev))
3363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3364 MGMT_STATUS_NOT_SUPPORTED);
3365
3366 appearance = le16_to_cpu(cp->appearance);
3367
3368 hci_dev_lock(hdev);
3369
3370 if (hdev->appearance != appearance) {
3371 hdev->appearance = appearance;
3372
3373 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3374 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3375
3376 ext_info_changed(hdev, sk);
3377 }
3378
3379 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3380 0);
3381
3382 hci_dev_unlock(hdev);
3383
3384 return err;
3385 }
3386
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3387 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3388 void *data, u16 len)
3389 {
3390 struct mgmt_rp_get_phy_confguration rp;
3391
3392 bt_dev_dbg(hdev, "sock %p", sk);
3393
3394 hci_dev_lock(hdev);
3395
3396 memset(&rp, 0, sizeof(rp));
3397
3398 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3399 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3400 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3401
3402 hci_dev_unlock(hdev);
3403
3404 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3405 &rp, sizeof(rp));
3406 }
3407
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3408 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3409 {
3410 struct mgmt_ev_phy_configuration_changed ev;
3411
3412 memset(&ev, 0, sizeof(ev));
3413
3414 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3415
3416 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3417 sizeof(ev), skip);
3418 }
3419
set_default_phy_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)3420 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3421 u16 opcode, struct sk_buff *skb)
3422 {
3423 struct mgmt_pending_cmd *cmd;
3424
3425 bt_dev_dbg(hdev, "status 0x%02x", status);
3426
3427 hci_dev_lock(hdev);
3428
3429 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3430 if (!cmd)
3431 goto unlock;
3432
3433 if (status) {
3434 mgmt_cmd_status(cmd->sk, hdev->id,
3435 MGMT_OP_SET_PHY_CONFIGURATION,
3436 mgmt_status(status));
3437 } else {
3438 mgmt_cmd_complete(cmd->sk, hdev->id,
3439 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3440 NULL, 0);
3441
3442 mgmt_phy_configuration_changed(hdev, cmd->sk);
3443 }
3444
3445 mgmt_pending_remove(cmd);
3446
3447 unlock:
3448 hci_dev_unlock(hdev);
3449 }
3450
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3451 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3452 void *data, u16 len)
3453 {
3454 struct mgmt_cp_set_phy_confguration *cp = data;
3455 struct hci_cp_le_set_default_phy cp_phy;
3456 struct mgmt_pending_cmd *cmd;
3457 struct hci_request req;
3458 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3459 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3460 bool changed = false;
3461 int err;
3462
3463 bt_dev_dbg(hdev, "sock %p", sk);
3464
3465 configurable_phys = get_configurable_phys(hdev);
3466 supported_phys = get_supported_phys(hdev);
3467 selected_phys = __le32_to_cpu(cp->selected_phys);
3468
3469 if (selected_phys & ~supported_phys)
3470 return mgmt_cmd_status(sk, hdev->id,
3471 MGMT_OP_SET_PHY_CONFIGURATION,
3472 MGMT_STATUS_INVALID_PARAMS);
3473
3474 unconfigure_phys = supported_phys & ~configurable_phys;
3475
3476 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3477 return mgmt_cmd_status(sk, hdev->id,
3478 MGMT_OP_SET_PHY_CONFIGURATION,
3479 MGMT_STATUS_INVALID_PARAMS);
3480
3481 if (selected_phys == get_selected_phys(hdev))
3482 return mgmt_cmd_complete(sk, hdev->id,
3483 MGMT_OP_SET_PHY_CONFIGURATION,
3484 0, NULL, 0);
3485
3486 hci_dev_lock(hdev);
3487
3488 if (!hdev_is_powered(hdev)) {
3489 err = mgmt_cmd_status(sk, hdev->id,
3490 MGMT_OP_SET_PHY_CONFIGURATION,
3491 MGMT_STATUS_REJECTED);
3492 goto unlock;
3493 }
3494
3495 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3496 err = mgmt_cmd_status(sk, hdev->id,
3497 MGMT_OP_SET_PHY_CONFIGURATION,
3498 MGMT_STATUS_BUSY);
3499 goto unlock;
3500 }
3501
3502 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3503 pkt_type |= (HCI_DH3 | HCI_DM3);
3504 else
3505 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3506
3507 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3508 pkt_type |= (HCI_DH5 | HCI_DM5);
3509 else
3510 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3511
3512 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3513 pkt_type &= ~HCI_2DH1;
3514 else
3515 pkt_type |= HCI_2DH1;
3516
3517 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3518 pkt_type &= ~HCI_2DH3;
3519 else
3520 pkt_type |= HCI_2DH3;
3521
3522 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3523 pkt_type &= ~HCI_2DH5;
3524 else
3525 pkt_type |= HCI_2DH5;
3526
3527 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3528 pkt_type &= ~HCI_3DH1;
3529 else
3530 pkt_type |= HCI_3DH1;
3531
3532 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3533 pkt_type &= ~HCI_3DH3;
3534 else
3535 pkt_type |= HCI_3DH3;
3536
3537 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3538 pkt_type &= ~HCI_3DH5;
3539 else
3540 pkt_type |= HCI_3DH5;
3541
3542 if (pkt_type != hdev->pkt_type) {
3543 hdev->pkt_type = pkt_type;
3544 changed = true;
3545 }
3546
3547 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3548 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3549 if (changed)
3550 mgmt_phy_configuration_changed(hdev, sk);
3551
3552 err = mgmt_cmd_complete(sk, hdev->id,
3553 MGMT_OP_SET_PHY_CONFIGURATION,
3554 0, NULL, 0);
3555
3556 goto unlock;
3557 }
3558
3559 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3560 len);
3561 if (!cmd) {
3562 err = -ENOMEM;
3563 goto unlock;
3564 }
3565
3566 hci_req_init(&req, hdev);
3567
3568 memset(&cp_phy, 0, sizeof(cp_phy));
3569
3570 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3571 cp_phy.all_phys |= 0x01;
3572
3573 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3574 cp_phy.all_phys |= 0x02;
3575
3576 if (selected_phys & MGMT_PHY_LE_1M_TX)
3577 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3578
3579 if (selected_phys & MGMT_PHY_LE_2M_TX)
3580 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3581
3582 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3583 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3584
3585 if (selected_phys & MGMT_PHY_LE_1M_RX)
3586 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3587
3588 if (selected_phys & MGMT_PHY_LE_2M_RX)
3589 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3590
3591 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3592 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3593
3594 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3595
3596 err = hci_req_run_skb(&req, set_default_phy_complete);
3597 if (err < 0)
3598 mgmt_pending_remove(cmd);
3599
3600 unlock:
3601 hci_dev_unlock(hdev);
3602
3603 return err;
3604 }
3605
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3606 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3607 u16 len)
3608 {
3609 int err = MGMT_STATUS_SUCCESS;
3610 struct mgmt_cp_set_blocked_keys *keys = data;
3611 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3612 sizeof(struct mgmt_blocked_key_info));
3613 u16 key_count, expected_len;
3614 int i;
3615
3616 bt_dev_dbg(hdev, "sock %p", sk);
3617
3618 key_count = __le16_to_cpu(keys->key_count);
3619 if (key_count > max_key_count) {
3620 bt_dev_err(hdev, "too big key_count value %u", key_count);
3621 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3622 MGMT_STATUS_INVALID_PARAMS);
3623 }
3624
3625 expected_len = struct_size(keys, keys, key_count);
3626 if (expected_len != len) {
3627 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3628 expected_len, len);
3629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3630 MGMT_STATUS_INVALID_PARAMS);
3631 }
3632
3633 hci_dev_lock(hdev);
3634
3635 hci_blocked_keys_clear(hdev);
3636
3637 for (i = 0; i < keys->key_count; ++i) {
3638 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3639
3640 if (!b) {
3641 err = MGMT_STATUS_NO_RESOURCES;
3642 break;
3643 }
3644
3645 b->type = keys->keys[i].type;
3646 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3647 list_add_rcu(&b->list, &hdev->blocked_keys);
3648 }
3649 hci_dev_unlock(hdev);
3650
3651 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3652 err, NULL, 0);
3653 }
3654
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3655 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3656 void *data, u16 len)
3657 {
3658 struct mgmt_mode *cp = data;
3659 int err;
3660 bool changed = false;
3661
3662 bt_dev_dbg(hdev, "sock %p", sk);
3663
3664 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3665 return mgmt_cmd_status(sk, hdev->id,
3666 MGMT_OP_SET_WIDEBAND_SPEECH,
3667 MGMT_STATUS_NOT_SUPPORTED);
3668
3669 if (cp->val != 0x00 && cp->val != 0x01)
3670 return mgmt_cmd_status(sk, hdev->id,
3671 MGMT_OP_SET_WIDEBAND_SPEECH,
3672 MGMT_STATUS_INVALID_PARAMS);
3673
3674 hci_dev_lock(hdev);
3675
3676 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3677 err = mgmt_cmd_status(sk, hdev->id,
3678 MGMT_OP_SET_WIDEBAND_SPEECH,
3679 MGMT_STATUS_BUSY);
3680 goto unlock;
3681 }
3682
3683 if (hdev_is_powered(hdev) &&
3684 !!cp->val != hci_dev_test_flag(hdev,
3685 HCI_WIDEBAND_SPEECH_ENABLED)) {
3686 err = mgmt_cmd_status(sk, hdev->id,
3687 MGMT_OP_SET_WIDEBAND_SPEECH,
3688 MGMT_STATUS_REJECTED);
3689 goto unlock;
3690 }
3691
3692 if (cp->val)
3693 changed = !hci_dev_test_and_set_flag(hdev,
3694 HCI_WIDEBAND_SPEECH_ENABLED);
3695 else
3696 changed = hci_dev_test_and_clear_flag(hdev,
3697 HCI_WIDEBAND_SPEECH_ENABLED);
3698
3699 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3700 if (err < 0)
3701 goto unlock;
3702
3703 if (changed)
3704 err = new_settings(hdev, sk);
3705
3706 unlock:
3707 hci_dev_unlock(hdev);
3708 return err;
3709 }
3710
read_security_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3711 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3712 void *data, u16 data_len)
3713 {
3714 char buf[16];
3715 struct mgmt_rp_read_security_info *rp = (void *)buf;
3716 u16 sec_len = 0;
3717 u8 flags = 0;
3718
3719 bt_dev_dbg(hdev, "sock %p", sk);
3720
3721 memset(&buf, 0, sizeof(buf));
3722
3723 hci_dev_lock(hdev);
3724
3725 /* When the Read Simple Pairing Options command is supported, then
3726 * the remote public key validation is supported.
3727 */
3728 if (hdev->commands[41] & 0x08)
3729 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3730
3731 flags |= 0x02; /* Remote public key validation (LE) */
3732
3733 /* When the Read Encryption Key Size command is supported, then the
3734 * encryption key size is enforced.
3735 */
3736 if (hdev->commands[20] & 0x10)
3737 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3738
3739 flags |= 0x08; /* Encryption key size enforcement (LE) */
3740
3741 sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3742
3743 /* When the Read Simple Pairing Options command is supported, then
3744 * also max encryption key size information is provided.
3745 */
3746 if (hdev->commands[41] & 0x08)
3747 sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3748 hdev->max_enc_key_size);
3749
3750 sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3751
3752 rp->sec_len = cpu_to_le16(sec_len);
3753
3754 hci_dev_unlock(hdev);
3755
3756 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3757 rp, sizeof(*rp) + sec_len);
3758 }
3759
3760 #ifdef CONFIG_BT_FEATURE_DEBUG
3761 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3762 static const u8 debug_uuid[16] = {
3763 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3764 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3765 };
3766 #endif
3767
3768 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3769 static const u8 simult_central_periph_uuid[16] = {
3770 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3771 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3772 };
3773
3774 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3775 static const u8 rpa_resolution_uuid[16] = {
3776 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3777 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3778 };
3779
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3780 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3781 void *data, u16 data_len)
3782 {
3783 char buf[62]; /* Enough space for 3 features */
3784 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3785 u16 idx = 0;
3786 u32 flags;
3787
3788 bt_dev_dbg(hdev, "sock %p", sk);
3789
3790 memset(&buf, 0, sizeof(buf));
3791
3792 #ifdef CONFIG_BT_FEATURE_DEBUG
3793 if (!hdev) {
3794 flags = bt_dbg_get() ? BIT(0) : 0;
3795
3796 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3797 rp->features[idx].flags = cpu_to_le32(flags);
3798 idx++;
3799 }
3800 #endif
3801
3802 if (hdev) {
3803 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3804 (hdev->le_states[4] & 0x08) && /* Central */
3805 (hdev->le_states[4] & 0x40) && /* Peripheral */
3806 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3807 flags = BIT(0);
3808 else
3809 flags = 0;
3810
3811 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3812 rp->features[idx].flags = cpu_to_le32(flags);
3813 idx++;
3814 }
3815
3816 if (hdev && use_ll_privacy(hdev)) {
3817 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3818 flags = BIT(0) | BIT(1);
3819 else
3820 flags = BIT(1);
3821
3822 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3823 rp->features[idx].flags = cpu_to_le32(flags);
3824 idx++;
3825 }
3826
3827 rp->feature_count = cpu_to_le16(idx);
3828
3829 /* After reading the experimental features information, enable
3830 * the events to update client on any future change.
3831 */
3832 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3833
3834 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3835 MGMT_OP_READ_EXP_FEATURES_INFO,
3836 0, rp, sizeof(*rp) + (20 * idx));
3837 }
3838
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)3839 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3840 struct sock *skip)
3841 {
3842 struct mgmt_ev_exp_feature_changed ev;
3843
3844 memset(&ev, 0, sizeof(ev));
3845 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3846 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3847
3848 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3849 &ev, sizeof(ev),
3850 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3851
3852 }
3853
3854 #ifdef CONFIG_BT_FEATURE_DEBUG
exp_debug_feature_changed(bool enabled,struct sock * skip)3855 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3856 {
3857 struct mgmt_ev_exp_feature_changed ev;
3858
3859 memset(&ev, 0, sizeof(ev));
3860 memcpy(ev.uuid, debug_uuid, 16);
3861 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3862
3863 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3864 &ev, sizeof(ev),
3865 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3866 }
3867 #endif
3868
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3869 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3870 void *data, u16 data_len)
3871 {
3872 struct mgmt_cp_set_exp_feature *cp = data;
3873 struct mgmt_rp_set_exp_feature rp;
3874
3875 bt_dev_dbg(hdev, "sock %p", sk);
3876
3877 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3878 memset(rp.uuid, 0, 16);
3879 rp.flags = cpu_to_le32(0);
3880
3881 #ifdef CONFIG_BT_FEATURE_DEBUG
3882 if (!hdev) {
3883 bool changed = bt_dbg_get();
3884
3885 bt_dbg_set(false);
3886
3887 if (changed)
3888 exp_debug_feature_changed(false, sk);
3889 }
3890 #endif
3891
3892 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3893 bool changed = hci_dev_test_flag(hdev,
3894 HCI_ENABLE_LL_PRIVACY);
3895
3896 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3897
3898 if (changed)
3899 exp_ll_privacy_feature_changed(false, hdev, sk);
3900 }
3901
3902 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3903
3904 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3905 MGMT_OP_SET_EXP_FEATURE, 0,
3906 &rp, sizeof(rp));
3907 }
3908
3909 #ifdef CONFIG_BT_FEATURE_DEBUG
3910 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3911 bool val, changed;
3912 int err;
3913
3914 /* Command requires to use the non-controller index */
3915 if (hdev)
3916 return mgmt_cmd_status(sk, hdev->id,
3917 MGMT_OP_SET_EXP_FEATURE,
3918 MGMT_STATUS_INVALID_INDEX);
3919
3920 /* Parameters are limited to a single octet */
3921 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3922 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3923 MGMT_OP_SET_EXP_FEATURE,
3924 MGMT_STATUS_INVALID_PARAMS);
3925
3926 /* Only boolean on/off is supported */
3927 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3928 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3929 MGMT_OP_SET_EXP_FEATURE,
3930 MGMT_STATUS_INVALID_PARAMS);
3931
3932 val = !!cp->param[0];
3933 changed = val ? !bt_dbg_get() : bt_dbg_get();
3934 bt_dbg_set(val);
3935
3936 memcpy(rp.uuid, debug_uuid, 16);
3937 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3938
3939 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3940
3941 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3942 MGMT_OP_SET_EXP_FEATURE, 0,
3943 &rp, sizeof(rp));
3944
3945 if (changed)
3946 exp_debug_feature_changed(val, sk);
3947
3948 return err;
3949 }
3950 #endif
3951
3952 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3953 bool val, changed;
3954 int err;
3955 u32 flags;
3956
3957 /* Command requires to use the controller index */
3958 if (!hdev)
3959 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3960 MGMT_OP_SET_EXP_FEATURE,
3961 MGMT_STATUS_INVALID_INDEX);
3962
3963 /* Changes can only be made when controller is powered down */
3964 if (hdev_is_powered(hdev))
3965 return mgmt_cmd_status(sk, hdev->id,
3966 MGMT_OP_SET_EXP_FEATURE,
3967 MGMT_STATUS_NOT_POWERED);
3968
3969 /* Parameters are limited to a single octet */
3970 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3971 return mgmt_cmd_status(sk, hdev->id,
3972 MGMT_OP_SET_EXP_FEATURE,
3973 MGMT_STATUS_INVALID_PARAMS);
3974
3975 /* Only boolean on/off is supported */
3976 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3977 return mgmt_cmd_status(sk, hdev->id,
3978 MGMT_OP_SET_EXP_FEATURE,
3979 MGMT_STATUS_INVALID_PARAMS);
3980
3981 val = !!cp->param[0];
3982
3983 if (val) {
3984 changed = !hci_dev_test_flag(hdev,
3985 HCI_ENABLE_LL_PRIVACY);
3986 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3987 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3988
3989 /* Enable LL privacy + supported settings changed */
3990 flags = BIT(0) | BIT(1);
3991 } else {
3992 changed = hci_dev_test_flag(hdev,
3993 HCI_ENABLE_LL_PRIVACY);
3994 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3995
3996 /* Disable LL privacy + supported settings changed */
3997 flags = BIT(1);
3998 }
3999
4000 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4001 rp.flags = cpu_to_le32(flags);
4002
4003 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4004
4005 err = mgmt_cmd_complete(sk, hdev->id,
4006 MGMT_OP_SET_EXP_FEATURE, 0,
4007 &rp, sizeof(rp));
4008
4009 if (changed)
4010 exp_ll_privacy_feature_changed(val, hdev, sk);
4011
4012 return err;
4013 }
4014
4015 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4016 MGMT_OP_SET_EXP_FEATURE,
4017 MGMT_STATUS_NOT_SUPPORTED);
4018 }
4019
4020 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4021
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4022 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4023 u16 data_len)
4024 {
4025 struct mgmt_cp_get_device_flags *cp = data;
4026 struct mgmt_rp_get_device_flags rp;
4027 struct bdaddr_list_with_flags *br_params;
4028 struct hci_conn_params *params;
4029 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4030 u32 current_flags = 0;
4031 u8 status = MGMT_STATUS_INVALID_PARAMS;
4032
4033 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4034 &cp->addr.bdaddr, cp->addr.type);
4035
4036 hci_dev_lock(hdev);
4037
4038 if (cp->addr.type == BDADDR_BREDR) {
4039 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4040 &cp->addr.bdaddr,
4041 cp->addr.type);
4042 if (!br_params)
4043 goto done;
4044
4045 current_flags = br_params->current_flags;
4046 } else {
4047 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4048 le_addr_type(cp->addr.type));
4049
4050 if (!params)
4051 goto done;
4052
4053 current_flags = params->current_flags;
4054 }
4055
4056 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4057 rp.addr.type = cp->addr.type;
4058 rp.supported_flags = cpu_to_le32(supported_flags);
4059 rp.current_flags = cpu_to_le32(current_flags);
4060
4061 status = MGMT_STATUS_SUCCESS;
4062
4063 done:
4064 hci_dev_unlock(hdev);
4065
4066 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4067 &rp, sizeof(rp));
4068 }
4069
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)4070 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4071 bdaddr_t *bdaddr, u8 bdaddr_type,
4072 u32 supported_flags, u32 current_flags)
4073 {
4074 struct mgmt_ev_device_flags_changed ev;
4075
4076 bacpy(&ev.addr.bdaddr, bdaddr);
4077 ev.addr.type = bdaddr_type;
4078 ev.supported_flags = cpu_to_le32(supported_flags);
4079 ev.current_flags = cpu_to_le32(current_flags);
4080
4081 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4082 }
4083
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4084 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4085 u16 len)
4086 {
4087 struct mgmt_cp_set_device_flags *cp = data;
4088 struct bdaddr_list_with_flags *br_params;
4089 struct hci_conn_params *params;
4090 u8 status = MGMT_STATUS_INVALID_PARAMS;
4091 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4092 u32 current_flags = __le32_to_cpu(cp->current_flags);
4093
4094 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4095 &cp->addr.bdaddr, cp->addr.type,
4096 __le32_to_cpu(current_flags));
4097
4098 if ((supported_flags | current_flags) != supported_flags) {
4099 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4100 current_flags, supported_flags);
4101 goto done;
4102 }
4103
4104 hci_dev_lock(hdev);
4105
4106 if (cp->addr.type == BDADDR_BREDR) {
4107 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4108 &cp->addr.bdaddr,
4109 cp->addr.type);
4110
4111 if (br_params) {
4112 br_params->current_flags = current_flags;
4113 status = MGMT_STATUS_SUCCESS;
4114 } else {
4115 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4116 &cp->addr.bdaddr, cp->addr.type);
4117 }
4118 } else {
4119 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4120 le_addr_type(cp->addr.type));
4121 if (params) {
4122 params->current_flags = current_flags;
4123 status = MGMT_STATUS_SUCCESS;
4124 } else {
4125 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4126 &cp->addr.bdaddr,
4127 le_addr_type(cp->addr.type));
4128 }
4129 }
4130
4131 done:
4132 hci_dev_unlock(hdev);
4133
4134 if (status == MGMT_STATUS_SUCCESS)
4135 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4136 supported_flags, current_flags);
4137
4138 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4139 &cp->addr, sizeof(cp->addr));
4140 }
4141
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)4142 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4143 u16 handle)
4144 {
4145 struct mgmt_ev_adv_monitor_added ev;
4146
4147 ev.monitor_handle = cpu_to_le16(handle);
4148
4149 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4150 }
4151
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,u16 handle)4152 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4153 u16 handle)
4154 {
4155 struct mgmt_ev_adv_monitor_added ev;
4156
4157 ev.monitor_handle = cpu_to_le16(handle);
4158
4159 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4160 }
4161
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4162 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4163 void *data, u16 len)
4164 {
4165 struct adv_monitor *monitor = NULL;
4166 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4167 int handle, err;
4168 size_t rp_size = 0;
4169 __u32 supported = 0;
4170 __u16 num_handles = 0;
4171 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4172
4173 BT_DBG("request for %s", hdev->name);
4174
4175 hci_dev_lock(hdev);
4176
4177 if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4178 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4179
4180 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4181 handles[num_handles++] = monitor->handle;
4182 }
4183
4184 hci_dev_unlock(hdev);
4185
4186 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4187 rp = kmalloc(rp_size, GFP_KERNEL);
4188 if (!rp)
4189 return -ENOMEM;
4190
4191 /* Once controller-based monitoring is in place, the enabled_features
4192 * should reflect the use.
4193 */
4194 rp->supported_features = cpu_to_le32(supported);
4195 rp->enabled_features = 0;
4196 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4197 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4198 rp->num_handles = cpu_to_le16(num_handles);
4199 if (num_handles)
4200 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4201
4202 err = mgmt_cmd_complete(sk, hdev->id,
4203 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4204 MGMT_STATUS_SUCCESS, rp, rp_size);
4205
4206 kfree(rp);
4207
4208 return err;
4209 }
4210
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4211 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4212 void *data, u16 len)
4213 {
4214 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4215 struct mgmt_rp_add_adv_patterns_monitor rp;
4216 struct adv_monitor *m = NULL;
4217 struct adv_pattern *p = NULL;
4218 unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4219 __u8 cp_ofst = 0, cp_len = 0;
4220 int err, i;
4221
4222 BT_DBG("request for %s", hdev->name);
4223
4224 if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4225 err = mgmt_cmd_status(sk, hdev->id,
4226 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4227 MGMT_STATUS_INVALID_PARAMS);
4228 goto failed;
4229 }
4230
4231 m = kmalloc(sizeof(*m), GFP_KERNEL);
4232 if (!m) {
4233 err = -ENOMEM;
4234 goto failed;
4235 }
4236
4237 INIT_LIST_HEAD(&m->patterns);
4238 m->active = false;
4239
4240 for (i = 0; i < cp->pattern_count; i++) {
4241 if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4242 err = mgmt_cmd_status(sk, hdev->id,
4243 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4244 MGMT_STATUS_INVALID_PARAMS);
4245 goto failed;
4246 }
4247
4248 cp_ofst = cp->patterns[i].offset;
4249 cp_len = cp->patterns[i].length;
4250 if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4251 cp_len > HCI_MAX_AD_LENGTH ||
4252 (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4253 err = mgmt_cmd_status(sk, hdev->id,
4254 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4255 MGMT_STATUS_INVALID_PARAMS);
4256 goto failed;
4257 }
4258
4259 p = kmalloc(sizeof(*p), GFP_KERNEL);
4260 if (!p) {
4261 err = -ENOMEM;
4262 goto failed;
4263 }
4264
4265 p->ad_type = cp->patterns[i].ad_type;
4266 p->offset = cp->patterns[i].offset;
4267 p->length = cp->patterns[i].length;
4268 memcpy(p->value, cp->patterns[i].value, p->length);
4269
4270 INIT_LIST_HEAD(&p->list);
4271 list_add(&p->list, &m->patterns);
4272 }
4273
4274 if (mp_cnt != cp->pattern_count) {
4275 err = mgmt_cmd_status(sk, hdev->id,
4276 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4277 MGMT_STATUS_INVALID_PARAMS);
4278 goto failed;
4279 }
4280
4281 hci_dev_lock(hdev);
4282
4283 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4284
4285 err = hci_add_adv_monitor(hdev, m);
4286 if (err) {
4287 if (err == -ENOSPC) {
4288 mgmt_cmd_status(sk, hdev->id,
4289 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4290 MGMT_STATUS_NO_RESOURCES);
4291 }
4292 goto unlock;
4293 }
4294
4295 if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4296 mgmt_adv_monitor_added(sk, hdev, m->handle);
4297
4298 hci_dev_unlock(hdev);
4299
4300 rp.monitor_handle = cpu_to_le16(m->handle);
4301
4302 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4303 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4304
4305 unlock:
4306 hci_dev_unlock(hdev);
4307
4308 failed:
4309 hci_free_adv_monitor(m);
4310 return err;
4311 }
4312
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4313 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4314 void *data, u16 len)
4315 {
4316 struct mgmt_cp_remove_adv_monitor *cp = data;
4317 struct mgmt_rp_remove_adv_monitor rp;
4318 unsigned int prev_adv_monitors_cnt;
4319 u16 handle;
4320 int err;
4321
4322 BT_DBG("request for %s", hdev->name);
4323
4324 hci_dev_lock(hdev);
4325
4326 handle = __le16_to_cpu(cp->monitor_handle);
4327 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4328
4329 err = hci_remove_adv_monitor(hdev, handle);
4330 if (err == -ENOENT) {
4331 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4332 MGMT_STATUS_INVALID_INDEX);
4333 goto unlock;
4334 }
4335
4336 if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4337 mgmt_adv_monitor_removed(sk, hdev, handle);
4338
4339 hci_dev_unlock(hdev);
4340
4341 rp.monitor_handle = cp->monitor_handle;
4342
4343 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4344 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4345
4346 unlock:
4347 hci_dev_unlock(hdev);
4348 return err;
4349 }
4350
read_local_oob_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)4351 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4352 u16 opcode, struct sk_buff *skb)
4353 {
4354 struct mgmt_rp_read_local_oob_data mgmt_rp;
4355 size_t rp_size = sizeof(mgmt_rp);
4356 struct mgmt_pending_cmd *cmd;
4357
4358 bt_dev_dbg(hdev, "status %u", status);
4359
4360 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4361 if (!cmd)
4362 return;
4363
4364 if (status || !skb) {
4365 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4366 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4367 goto remove;
4368 }
4369
4370 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4371
4372 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4373 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4374
4375 if (skb->len < sizeof(*rp)) {
4376 mgmt_cmd_status(cmd->sk, hdev->id,
4377 MGMT_OP_READ_LOCAL_OOB_DATA,
4378 MGMT_STATUS_FAILED);
4379 goto remove;
4380 }
4381
4382 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4383 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4384
4385 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4386 } else {
4387 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4388
4389 if (skb->len < sizeof(*rp)) {
4390 mgmt_cmd_status(cmd->sk, hdev->id,
4391 MGMT_OP_READ_LOCAL_OOB_DATA,
4392 MGMT_STATUS_FAILED);
4393 goto remove;
4394 }
4395
4396 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4397 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4398
4399 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4400 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4401 }
4402
4403 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4404 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4405
4406 remove:
4407 mgmt_pending_remove(cmd);
4408 }
4409
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4410 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4411 void *data, u16 data_len)
4412 {
4413 struct mgmt_pending_cmd *cmd;
4414 struct hci_request req;
4415 int err;
4416
4417 bt_dev_dbg(hdev, "sock %p", sk);
4418
4419 hci_dev_lock(hdev);
4420
4421 if (!hdev_is_powered(hdev)) {
4422 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4423 MGMT_STATUS_NOT_POWERED);
4424 goto unlock;
4425 }
4426
4427 if (!lmp_ssp_capable(hdev)) {
4428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4429 MGMT_STATUS_NOT_SUPPORTED);
4430 goto unlock;
4431 }
4432
4433 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4434 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4435 MGMT_STATUS_BUSY);
4436 goto unlock;
4437 }
4438
4439 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4440 if (!cmd) {
4441 err = -ENOMEM;
4442 goto unlock;
4443 }
4444
4445 hci_req_init(&req, hdev);
4446
4447 if (bredr_sc_enabled(hdev))
4448 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4449 else
4450 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4451
4452 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4453 if (err < 0)
4454 mgmt_pending_remove(cmd);
4455
4456 unlock:
4457 hci_dev_unlock(hdev);
4458 return err;
4459 }
4460
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4461 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4462 void *data, u16 len)
4463 {
4464 struct mgmt_addr_info *addr = data;
4465 int err;
4466
4467 bt_dev_dbg(hdev, "sock %p", sk);
4468
4469 if (!bdaddr_type_is_valid(addr->type))
4470 return mgmt_cmd_complete(sk, hdev->id,
4471 MGMT_OP_ADD_REMOTE_OOB_DATA,
4472 MGMT_STATUS_INVALID_PARAMS,
4473 addr, sizeof(*addr));
4474
4475 hci_dev_lock(hdev);
4476
4477 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4478 struct mgmt_cp_add_remote_oob_data *cp = data;
4479 u8 status;
4480
4481 if (cp->addr.type != BDADDR_BREDR) {
4482 err = mgmt_cmd_complete(sk, hdev->id,
4483 MGMT_OP_ADD_REMOTE_OOB_DATA,
4484 MGMT_STATUS_INVALID_PARAMS,
4485 &cp->addr, sizeof(cp->addr));
4486 goto unlock;
4487 }
4488
4489 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4490 cp->addr.type, cp->hash,
4491 cp->rand, NULL, NULL);
4492 if (err < 0)
4493 status = MGMT_STATUS_FAILED;
4494 else
4495 status = MGMT_STATUS_SUCCESS;
4496
4497 err = mgmt_cmd_complete(sk, hdev->id,
4498 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4499 &cp->addr, sizeof(cp->addr));
4500 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4501 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4502 u8 *rand192, *hash192, *rand256, *hash256;
4503 u8 status;
4504
4505 if (bdaddr_type_is_le(cp->addr.type)) {
4506 /* Enforce zero-valued 192-bit parameters as
4507 * long as legacy SMP OOB isn't implemented.
4508 */
4509 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4510 memcmp(cp->hash192, ZERO_KEY, 16)) {
4511 err = mgmt_cmd_complete(sk, hdev->id,
4512 MGMT_OP_ADD_REMOTE_OOB_DATA,
4513 MGMT_STATUS_INVALID_PARAMS,
4514 addr, sizeof(*addr));
4515 goto unlock;
4516 }
4517
4518 rand192 = NULL;
4519 hash192 = NULL;
4520 } else {
4521 /* In case one of the P-192 values is set to zero,
4522 * then just disable OOB data for P-192.
4523 */
4524 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4525 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4526 rand192 = NULL;
4527 hash192 = NULL;
4528 } else {
4529 rand192 = cp->rand192;
4530 hash192 = cp->hash192;
4531 }
4532 }
4533
4534 /* In case one of the P-256 values is set to zero, then just
4535 * disable OOB data for P-256.
4536 */
4537 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4538 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4539 rand256 = NULL;
4540 hash256 = NULL;
4541 } else {
4542 rand256 = cp->rand256;
4543 hash256 = cp->hash256;
4544 }
4545
4546 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4547 cp->addr.type, hash192, rand192,
4548 hash256, rand256);
4549 if (err < 0)
4550 status = MGMT_STATUS_FAILED;
4551 else
4552 status = MGMT_STATUS_SUCCESS;
4553
4554 err = mgmt_cmd_complete(sk, hdev->id,
4555 MGMT_OP_ADD_REMOTE_OOB_DATA,
4556 status, &cp->addr, sizeof(cp->addr));
4557 } else {
4558 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4559 len);
4560 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4561 MGMT_STATUS_INVALID_PARAMS);
4562 }
4563
4564 unlock:
4565 hci_dev_unlock(hdev);
4566 return err;
4567 }
4568
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4569 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4570 void *data, u16 len)
4571 {
4572 struct mgmt_cp_remove_remote_oob_data *cp = data;
4573 u8 status;
4574 int err;
4575
4576 bt_dev_dbg(hdev, "sock %p", sk);
4577
4578 if (cp->addr.type != BDADDR_BREDR)
4579 return mgmt_cmd_complete(sk, hdev->id,
4580 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4581 MGMT_STATUS_INVALID_PARAMS,
4582 &cp->addr, sizeof(cp->addr));
4583
4584 hci_dev_lock(hdev);
4585
4586 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4587 hci_remote_oob_data_clear(hdev);
4588 status = MGMT_STATUS_SUCCESS;
4589 goto done;
4590 }
4591
4592 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4593 if (err < 0)
4594 status = MGMT_STATUS_INVALID_PARAMS;
4595 else
4596 status = MGMT_STATUS_SUCCESS;
4597
4598 done:
4599 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4600 status, &cp->addr, sizeof(cp->addr));
4601
4602 hci_dev_unlock(hdev);
4603 return err;
4604 }
4605
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)4606 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4607 {
4608 struct mgmt_pending_cmd *cmd;
4609
4610 bt_dev_dbg(hdev, "status %d", status);
4611
4612 hci_dev_lock(hdev);
4613
4614 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4615 if (!cmd)
4616 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4617
4618 if (!cmd)
4619 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4620
4621 if (cmd) {
4622 cmd->cmd_complete(cmd, mgmt_status(status));
4623 mgmt_pending_remove(cmd);
4624 }
4625
4626 hci_dev_unlock(hdev);
4627
4628 /* Handle suspend notifier */
4629 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4630 hdev->suspend_tasks)) {
4631 bt_dev_dbg(hdev, "Unpaused discovery");
4632 wake_up(&hdev->suspend_wait_q);
4633 }
4634 }
4635
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)4636 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4637 uint8_t *mgmt_status)
4638 {
4639 switch (type) {
4640 case DISCOV_TYPE_LE:
4641 *mgmt_status = mgmt_le_support(hdev);
4642 if (*mgmt_status)
4643 return false;
4644 break;
4645 case DISCOV_TYPE_INTERLEAVED:
4646 *mgmt_status = mgmt_le_support(hdev);
4647 if (*mgmt_status)
4648 return false;
4649 fallthrough;
4650 case DISCOV_TYPE_BREDR:
4651 *mgmt_status = mgmt_bredr_support(hdev);
4652 if (*mgmt_status)
4653 return false;
4654 break;
4655 default:
4656 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4657 return false;
4658 }
4659
4660 return true;
4661 }
4662
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)4663 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4664 u16 op, void *data, u16 len)
4665 {
4666 struct mgmt_cp_start_discovery *cp = data;
4667 struct mgmt_pending_cmd *cmd;
4668 u8 status;
4669 int err;
4670
4671 bt_dev_dbg(hdev, "sock %p", sk);
4672
4673 hci_dev_lock(hdev);
4674
4675 if (!hdev_is_powered(hdev)) {
4676 err = mgmt_cmd_complete(sk, hdev->id, op,
4677 MGMT_STATUS_NOT_POWERED,
4678 &cp->type, sizeof(cp->type));
4679 goto failed;
4680 }
4681
4682 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4683 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4684 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4685 &cp->type, sizeof(cp->type));
4686 goto failed;
4687 }
4688
4689 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4690 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4691 &cp->type, sizeof(cp->type));
4692 goto failed;
4693 }
4694
4695 /* Can't start discovery when it is paused */
4696 if (hdev->discovery_paused) {
4697 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4698 &cp->type, sizeof(cp->type));
4699 goto failed;
4700 }
4701
4702 /* Clear the discovery filter first to free any previously
4703 * allocated memory for the UUID list.
4704 */
4705 hci_discovery_filter_clear(hdev);
4706
4707 hdev->discovery.type = cp->type;
4708 hdev->discovery.report_invalid_rssi = false;
4709 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4710 hdev->discovery.limited = true;
4711 else
4712 hdev->discovery.limited = false;
4713
4714 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4715 if (!cmd) {
4716 err = -ENOMEM;
4717 goto failed;
4718 }
4719
4720 cmd->cmd_complete = generic_cmd_complete;
4721
4722 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4723 queue_work(hdev->req_workqueue, &hdev->discov_update);
4724 err = 0;
4725
4726 failed:
4727 hci_dev_unlock(hdev);
4728 return err;
4729 }
4730
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4731 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4732 void *data, u16 len)
4733 {
4734 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4735 data, len);
4736 }
4737
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4738 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4739 void *data, u16 len)
4740 {
4741 return start_discovery_internal(sk, hdev,
4742 MGMT_OP_START_LIMITED_DISCOVERY,
4743 data, len);
4744 }
4745
service_discovery_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)4746 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4747 u8 status)
4748 {
4749 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4750 cmd->param, 1);
4751 }
4752
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4753 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4754 void *data, u16 len)
4755 {
4756 struct mgmt_cp_start_service_discovery *cp = data;
4757 struct mgmt_pending_cmd *cmd;
4758 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4759 u16 uuid_count, expected_len;
4760 u8 status;
4761 int err;
4762
4763 bt_dev_dbg(hdev, "sock %p", sk);
4764
4765 hci_dev_lock(hdev);
4766
4767 if (!hdev_is_powered(hdev)) {
4768 err = mgmt_cmd_complete(sk, hdev->id,
4769 MGMT_OP_START_SERVICE_DISCOVERY,
4770 MGMT_STATUS_NOT_POWERED,
4771 &cp->type, sizeof(cp->type));
4772 goto failed;
4773 }
4774
4775 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4776 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4777 err = mgmt_cmd_complete(sk, hdev->id,
4778 MGMT_OP_START_SERVICE_DISCOVERY,
4779 MGMT_STATUS_BUSY, &cp->type,
4780 sizeof(cp->type));
4781 goto failed;
4782 }
4783
4784 uuid_count = __le16_to_cpu(cp->uuid_count);
4785 if (uuid_count > max_uuid_count) {
4786 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4787 uuid_count);
4788 err = mgmt_cmd_complete(sk, hdev->id,
4789 MGMT_OP_START_SERVICE_DISCOVERY,
4790 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4791 sizeof(cp->type));
4792 goto failed;
4793 }
4794
4795 expected_len = sizeof(*cp) + uuid_count * 16;
4796 if (expected_len != len) {
4797 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4798 expected_len, len);
4799 err = mgmt_cmd_complete(sk, hdev->id,
4800 MGMT_OP_START_SERVICE_DISCOVERY,
4801 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4802 sizeof(cp->type));
4803 goto failed;
4804 }
4805
4806 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4807 err = mgmt_cmd_complete(sk, hdev->id,
4808 MGMT_OP_START_SERVICE_DISCOVERY,
4809 status, &cp->type, sizeof(cp->type));
4810 goto failed;
4811 }
4812
4813 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4814 hdev, data, len);
4815 if (!cmd) {
4816 err = -ENOMEM;
4817 goto failed;
4818 }
4819
4820 cmd->cmd_complete = service_discovery_cmd_complete;
4821
4822 /* Clear the discovery filter first to free any previously
4823 * allocated memory for the UUID list.
4824 */
4825 hci_discovery_filter_clear(hdev);
4826
4827 hdev->discovery.result_filtering = true;
4828 hdev->discovery.type = cp->type;
4829 hdev->discovery.rssi = cp->rssi;
4830 hdev->discovery.uuid_count = uuid_count;
4831
4832 if (uuid_count > 0) {
4833 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4834 GFP_KERNEL);
4835 if (!hdev->discovery.uuids) {
4836 err = mgmt_cmd_complete(sk, hdev->id,
4837 MGMT_OP_START_SERVICE_DISCOVERY,
4838 MGMT_STATUS_FAILED,
4839 &cp->type, sizeof(cp->type));
4840 mgmt_pending_remove(cmd);
4841 goto failed;
4842 }
4843 }
4844
4845 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4846 queue_work(hdev->req_workqueue, &hdev->discov_update);
4847 err = 0;
4848
4849 failed:
4850 hci_dev_unlock(hdev);
4851 return err;
4852 }
4853
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)4854 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4855 {
4856 struct mgmt_pending_cmd *cmd;
4857
4858 bt_dev_dbg(hdev, "status %d", status);
4859
4860 hci_dev_lock(hdev);
4861
4862 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4863 if (cmd) {
4864 cmd->cmd_complete(cmd, mgmt_status(status));
4865 mgmt_pending_remove(cmd);
4866 }
4867
4868 hci_dev_unlock(hdev);
4869
4870 /* Handle suspend notifier */
4871 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4872 bt_dev_dbg(hdev, "Paused discovery");
4873 wake_up(&hdev->suspend_wait_q);
4874 }
4875 }
4876
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4877 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4878 u16 len)
4879 {
4880 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4881 struct mgmt_pending_cmd *cmd;
4882 int err;
4883
4884 bt_dev_dbg(hdev, "sock %p", sk);
4885
4886 hci_dev_lock(hdev);
4887
4888 if (!hci_discovery_active(hdev)) {
4889 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4890 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4891 sizeof(mgmt_cp->type));
4892 goto unlock;
4893 }
4894
4895 if (hdev->discovery.type != mgmt_cp->type) {
4896 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4897 MGMT_STATUS_INVALID_PARAMS,
4898 &mgmt_cp->type, sizeof(mgmt_cp->type));
4899 goto unlock;
4900 }
4901
4902 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4903 if (!cmd) {
4904 err = -ENOMEM;
4905 goto unlock;
4906 }
4907
4908 cmd->cmd_complete = generic_cmd_complete;
4909
4910 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4911 queue_work(hdev->req_workqueue, &hdev->discov_update);
4912 err = 0;
4913
4914 unlock:
4915 hci_dev_unlock(hdev);
4916 return err;
4917 }
4918
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4919 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4920 u16 len)
4921 {
4922 struct mgmt_cp_confirm_name *cp = data;
4923 struct inquiry_entry *e;
4924 int err;
4925
4926 bt_dev_dbg(hdev, "sock %p", sk);
4927
4928 hci_dev_lock(hdev);
4929
4930 if (!hci_discovery_active(hdev)) {
4931 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4932 MGMT_STATUS_FAILED, &cp->addr,
4933 sizeof(cp->addr));
4934 goto failed;
4935 }
4936
4937 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4938 if (!e) {
4939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4940 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4941 sizeof(cp->addr));
4942 goto failed;
4943 }
4944
4945 if (cp->name_known) {
4946 e->name_state = NAME_KNOWN;
4947 list_del(&e->list);
4948 } else {
4949 e->name_state = NAME_NEEDED;
4950 hci_inquiry_cache_update_resolve(hdev, e);
4951 }
4952
4953 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4954 &cp->addr, sizeof(cp->addr));
4955
4956 failed:
4957 hci_dev_unlock(hdev);
4958 return err;
4959 }
4960
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4961 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4962 u16 len)
4963 {
4964 struct mgmt_cp_block_device *cp = data;
4965 u8 status;
4966 int err;
4967
4968 bt_dev_dbg(hdev, "sock %p", sk);
4969
4970 if (!bdaddr_type_is_valid(cp->addr.type))
4971 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4972 MGMT_STATUS_INVALID_PARAMS,
4973 &cp->addr, sizeof(cp->addr));
4974
4975 hci_dev_lock(hdev);
4976
4977 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4978 cp->addr.type);
4979 if (err < 0) {
4980 status = MGMT_STATUS_FAILED;
4981 goto done;
4982 }
4983
4984 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4985 sk);
4986 status = MGMT_STATUS_SUCCESS;
4987
4988 done:
4989 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4990 &cp->addr, sizeof(cp->addr));
4991
4992 hci_dev_unlock(hdev);
4993
4994 return err;
4995 }
4996
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4997 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4998 u16 len)
4999 {
5000 struct mgmt_cp_unblock_device *cp = data;
5001 u8 status;
5002 int err;
5003
5004 bt_dev_dbg(hdev, "sock %p", sk);
5005
5006 if (!bdaddr_type_is_valid(cp->addr.type))
5007 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5008 MGMT_STATUS_INVALID_PARAMS,
5009 &cp->addr, sizeof(cp->addr));
5010
5011 hci_dev_lock(hdev);
5012
5013 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5014 cp->addr.type);
5015 if (err < 0) {
5016 status = MGMT_STATUS_INVALID_PARAMS;
5017 goto done;
5018 }
5019
5020 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5021 sk);
5022 status = MGMT_STATUS_SUCCESS;
5023
5024 done:
5025 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5026 &cp->addr, sizeof(cp->addr));
5027
5028 hci_dev_unlock(hdev);
5029
5030 return err;
5031 }
5032
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5033 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5034 u16 len)
5035 {
5036 struct mgmt_cp_set_device_id *cp = data;
5037 struct hci_request req;
5038 int err;
5039 __u16 source;
5040
5041 bt_dev_dbg(hdev, "sock %p", sk);
5042
5043 source = __le16_to_cpu(cp->source);
5044
5045 if (source > 0x0002)
5046 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5047 MGMT_STATUS_INVALID_PARAMS);
5048
5049 hci_dev_lock(hdev);
5050
5051 hdev->devid_source = source;
5052 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5053 hdev->devid_product = __le16_to_cpu(cp->product);
5054 hdev->devid_version = __le16_to_cpu(cp->version);
5055
5056 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5057 NULL, 0);
5058
5059 hci_req_init(&req, hdev);
5060 __hci_req_update_eir(&req);
5061 hci_req_run(&req, NULL);
5062
5063 hci_dev_unlock(hdev);
5064
5065 return err;
5066 }
5067
enable_advertising_instance(struct hci_dev * hdev,u8 status,u16 opcode)5068 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5069 u16 opcode)
5070 {
5071 bt_dev_dbg(hdev, "status %d", status);
5072 }
5073
set_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)5074 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5075 u16 opcode)
5076 {
5077 struct cmd_lookup match = { NULL, hdev };
5078 struct hci_request req;
5079 u8 instance;
5080 struct adv_info *adv_instance;
5081 int err;
5082
5083 hci_dev_lock(hdev);
5084
5085 if (status) {
5086 u8 mgmt_err = mgmt_status(status);
5087
5088 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5089 cmd_status_rsp, &mgmt_err);
5090 goto unlock;
5091 }
5092
5093 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5094 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5095 else
5096 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5097
5098 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5099 &match);
5100
5101 new_settings(hdev, match.sk);
5102
5103 if (match.sk)
5104 sock_put(match.sk);
5105
5106 /* Handle suspend notifier */
5107 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5108 hdev->suspend_tasks)) {
5109 bt_dev_dbg(hdev, "Paused advertising");
5110 wake_up(&hdev->suspend_wait_q);
5111 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5112 hdev->suspend_tasks)) {
5113 bt_dev_dbg(hdev, "Unpaused advertising");
5114 wake_up(&hdev->suspend_wait_q);
5115 }
5116
5117 /* If "Set Advertising" was just disabled and instance advertising was
5118 * set up earlier, then re-enable multi-instance advertising.
5119 */
5120 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5121 list_empty(&hdev->adv_instances))
5122 goto unlock;
5123
5124 instance = hdev->cur_adv_instance;
5125 if (!instance) {
5126 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5127 struct adv_info, list);
5128 if (!adv_instance)
5129 goto unlock;
5130
5131 instance = adv_instance->instance;
5132 }
5133
5134 hci_req_init(&req, hdev);
5135
5136 err = __hci_req_schedule_adv_instance(&req, instance, true);
5137
5138 if (!err)
5139 err = hci_req_run(&req, enable_advertising_instance);
5140
5141 if (err)
5142 bt_dev_err(hdev, "failed to re-configure advertising");
5143
5144 unlock:
5145 hci_dev_unlock(hdev);
5146 }
5147
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5148 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5149 u16 len)
5150 {
5151 struct mgmt_mode *cp = data;
5152 struct mgmt_pending_cmd *cmd;
5153 struct hci_request req;
5154 u8 val, status;
5155 int err;
5156
5157 bt_dev_dbg(hdev, "sock %p", sk);
5158
5159 status = mgmt_le_support(hdev);
5160 if (status)
5161 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5162 status);
5163
5164 /* Enabling the experimental LL Privay support disables support for
5165 * advertising.
5166 */
5167 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5168 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5169 MGMT_STATUS_NOT_SUPPORTED);
5170
5171 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5172 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5173 MGMT_STATUS_INVALID_PARAMS);
5174
5175 if (hdev->advertising_paused)
5176 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5177 MGMT_STATUS_BUSY);
5178
5179 hci_dev_lock(hdev);
5180
5181 val = !!cp->val;
5182
5183 /* The following conditions are ones which mean that we should
5184 * not do any HCI communication but directly send a mgmt
5185 * response to user space (after toggling the flag if
5186 * necessary).
5187 */
5188 if (!hdev_is_powered(hdev) ||
5189 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5190 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5191 hci_conn_num(hdev, LE_LINK) > 0 ||
5192 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5193 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5194 bool changed;
5195
5196 if (cp->val) {
5197 hdev->cur_adv_instance = 0x00;
5198 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5199 if (cp->val == 0x02)
5200 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5201 else
5202 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5203 } else {
5204 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5205 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5206 }
5207
5208 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5209 if (err < 0)
5210 goto unlock;
5211
5212 if (changed)
5213 err = new_settings(hdev, sk);
5214
5215 goto unlock;
5216 }
5217
5218 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5219 pending_find(MGMT_OP_SET_LE, hdev)) {
5220 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5221 MGMT_STATUS_BUSY);
5222 goto unlock;
5223 }
5224
5225 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5226 if (!cmd) {
5227 err = -ENOMEM;
5228 goto unlock;
5229 }
5230
5231 hci_req_init(&req, hdev);
5232
5233 if (cp->val == 0x02)
5234 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5235 else
5236 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5237
5238 cancel_adv_timeout(hdev);
5239
5240 if (val) {
5241 /* Switch to instance "0" for the Set Advertising setting.
5242 * We cannot use update_[adv|scan_rsp]_data() here as the
5243 * HCI_ADVERTISING flag is not yet set.
5244 */
5245 hdev->cur_adv_instance = 0x00;
5246
5247 if (ext_adv_capable(hdev)) {
5248 __hci_req_start_ext_adv(&req, 0x00);
5249 } else {
5250 __hci_req_update_adv_data(&req, 0x00);
5251 __hci_req_update_scan_rsp_data(&req, 0x00);
5252 __hci_req_enable_advertising(&req);
5253 }
5254 } else {
5255 __hci_req_disable_advertising(&req);
5256 }
5257
5258 err = hci_req_run(&req, set_advertising_complete);
5259 if (err < 0)
5260 mgmt_pending_remove(cmd);
5261
5262 unlock:
5263 hci_dev_unlock(hdev);
5264 return err;
5265 }
5266
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5267 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5268 void *data, u16 len)
5269 {
5270 struct mgmt_cp_set_static_address *cp = data;
5271 int err;
5272
5273 bt_dev_dbg(hdev, "sock %p", sk);
5274
5275 if (!lmp_le_capable(hdev))
5276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5277 MGMT_STATUS_NOT_SUPPORTED);
5278
5279 if (hdev_is_powered(hdev))
5280 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5281 MGMT_STATUS_REJECTED);
5282
5283 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5284 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5285 return mgmt_cmd_status(sk, hdev->id,
5286 MGMT_OP_SET_STATIC_ADDRESS,
5287 MGMT_STATUS_INVALID_PARAMS);
5288
5289 /* Two most significant bits shall be set */
5290 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5291 return mgmt_cmd_status(sk, hdev->id,
5292 MGMT_OP_SET_STATIC_ADDRESS,
5293 MGMT_STATUS_INVALID_PARAMS);
5294 }
5295
5296 hci_dev_lock(hdev);
5297
5298 bacpy(&hdev->static_addr, &cp->bdaddr);
5299
5300 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5301 if (err < 0)
5302 goto unlock;
5303
5304 err = new_settings(hdev, sk);
5305
5306 unlock:
5307 hci_dev_unlock(hdev);
5308 return err;
5309 }
5310
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5311 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5312 void *data, u16 len)
5313 {
5314 struct mgmt_cp_set_scan_params *cp = data;
5315 __u16 interval, window;
5316 int err;
5317
5318 bt_dev_dbg(hdev, "sock %p", sk);
5319
5320 if (!lmp_le_capable(hdev))
5321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5322 MGMT_STATUS_NOT_SUPPORTED);
5323
5324 interval = __le16_to_cpu(cp->interval);
5325
5326 if (interval < 0x0004 || interval > 0x4000)
5327 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5328 MGMT_STATUS_INVALID_PARAMS);
5329
5330 window = __le16_to_cpu(cp->window);
5331
5332 if (window < 0x0004 || window > 0x4000)
5333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5334 MGMT_STATUS_INVALID_PARAMS);
5335
5336 if (window > interval)
5337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5338 MGMT_STATUS_INVALID_PARAMS);
5339
5340 hci_dev_lock(hdev);
5341
5342 hdev->le_scan_interval = interval;
5343 hdev->le_scan_window = window;
5344
5345 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5346 NULL, 0);
5347
5348 /* If background scan is running, restart it so new parameters are
5349 * loaded.
5350 */
5351 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5352 hdev->discovery.state == DISCOVERY_STOPPED) {
5353 struct hci_request req;
5354
5355 hci_req_init(&req, hdev);
5356
5357 hci_req_add_le_scan_disable(&req, false);
5358 hci_req_add_le_passive_scan(&req);
5359
5360 hci_req_run(&req, NULL);
5361 }
5362
5363 hci_dev_unlock(hdev);
5364
5365 return err;
5366 }
5367
fast_connectable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5368 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5369 u16 opcode)
5370 {
5371 struct mgmt_pending_cmd *cmd;
5372
5373 bt_dev_dbg(hdev, "status 0x%02x", status);
5374
5375 hci_dev_lock(hdev);
5376
5377 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5378 if (!cmd)
5379 goto unlock;
5380
5381 if (status) {
5382 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5383 mgmt_status(status));
5384 } else {
5385 struct mgmt_mode *cp = cmd->param;
5386
5387 if (cp->val)
5388 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5389 else
5390 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5391
5392 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5393 new_settings(hdev, cmd->sk);
5394 }
5395
5396 mgmt_pending_remove(cmd);
5397
5398 unlock:
5399 hci_dev_unlock(hdev);
5400 }
5401
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5402 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5403 void *data, u16 len)
5404 {
5405 struct mgmt_mode *cp = data;
5406 struct mgmt_pending_cmd *cmd;
5407 struct hci_request req;
5408 int err;
5409
5410 bt_dev_dbg(hdev, "sock %p", sk);
5411
5412 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5413 hdev->hci_ver < BLUETOOTH_VER_1_2)
5414 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5415 MGMT_STATUS_NOT_SUPPORTED);
5416
5417 if (cp->val != 0x00 && cp->val != 0x01)
5418 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5419 MGMT_STATUS_INVALID_PARAMS);
5420
5421 hci_dev_lock(hdev);
5422
5423 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5424 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5425 MGMT_STATUS_BUSY);
5426 goto unlock;
5427 }
5428
5429 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5430 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5431 hdev);
5432 goto unlock;
5433 }
5434
5435 if (!hdev_is_powered(hdev)) {
5436 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5437 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5438 hdev);
5439 new_settings(hdev, sk);
5440 goto unlock;
5441 }
5442
5443 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5444 data, len);
5445 if (!cmd) {
5446 err = -ENOMEM;
5447 goto unlock;
5448 }
5449
5450 hci_req_init(&req, hdev);
5451
5452 __hci_req_write_fast_connectable(&req, cp->val);
5453
5454 err = hci_req_run(&req, fast_connectable_complete);
5455 if (err < 0) {
5456 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5457 MGMT_STATUS_FAILED);
5458 mgmt_pending_remove(cmd);
5459 }
5460
5461 unlock:
5462 hci_dev_unlock(hdev);
5463
5464 return err;
5465 }
5466
set_bredr_complete(struct hci_dev * hdev,u8 status,u16 opcode)5467 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5468 {
5469 struct mgmt_pending_cmd *cmd;
5470
5471 bt_dev_dbg(hdev, "status 0x%02x", status);
5472
5473 hci_dev_lock(hdev);
5474
5475 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5476 if (!cmd)
5477 goto unlock;
5478
5479 if (status) {
5480 u8 mgmt_err = mgmt_status(status);
5481
5482 /* We need to restore the flag if related HCI commands
5483 * failed.
5484 */
5485 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5486
5487 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5488 } else {
5489 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5490 new_settings(hdev, cmd->sk);
5491 }
5492
5493 mgmt_pending_remove(cmd);
5494
5495 unlock:
5496 hci_dev_unlock(hdev);
5497 }
5498
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5499 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5500 {
5501 struct mgmt_mode *cp = data;
5502 struct mgmt_pending_cmd *cmd;
5503 struct hci_request req;
5504 int err;
5505
5506 bt_dev_dbg(hdev, "sock %p", sk);
5507
5508 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5509 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5510 MGMT_STATUS_NOT_SUPPORTED);
5511
5512 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5513 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5514 MGMT_STATUS_REJECTED);
5515
5516 if (cp->val != 0x00 && cp->val != 0x01)
5517 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5518 MGMT_STATUS_INVALID_PARAMS);
5519
5520 hci_dev_lock(hdev);
5521
5522 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5523 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5524 goto unlock;
5525 }
5526
5527 if (!hdev_is_powered(hdev)) {
5528 if (!cp->val) {
5529 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5530 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5531 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5532 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5533 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5534 }
5535
5536 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5537
5538 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5539 if (err < 0)
5540 goto unlock;
5541
5542 err = new_settings(hdev, sk);
5543 goto unlock;
5544 }
5545
5546 /* Reject disabling when powered on */
5547 if (!cp->val) {
5548 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5549 MGMT_STATUS_REJECTED);
5550 goto unlock;
5551 } else {
5552 /* When configuring a dual-mode controller to operate
5553 * with LE only and using a static address, then switching
5554 * BR/EDR back on is not allowed.
5555 *
5556 * Dual-mode controllers shall operate with the public
5557 * address as its identity address for BR/EDR and LE. So
5558 * reject the attempt to create an invalid configuration.
5559 *
5560 * The same restrictions applies when secure connections
5561 * has been enabled. For BR/EDR this is a controller feature
5562 * while for LE it is a host stack feature. This means that
5563 * switching BR/EDR back on when secure connections has been
5564 * enabled is not a supported transaction.
5565 */
5566 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5567 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5568 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5569 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5570 MGMT_STATUS_REJECTED);
5571 goto unlock;
5572 }
5573 }
5574
5575 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5576 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5577 MGMT_STATUS_BUSY);
5578 goto unlock;
5579 }
5580
5581 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5582 if (!cmd) {
5583 err = -ENOMEM;
5584 goto unlock;
5585 }
5586
5587 /* We need to flip the bit already here so that
5588 * hci_req_update_adv_data generates the correct flags.
5589 */
5590 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5591
5592 hci_req_init(&req, hdev);
5593
5594 __hci_req_write_fast_connectable(&req, false);
5595 __hci_req_update_scan(&req);
5596
5597 /* Since only the advertising data flags will change, there
5598 * is no need to update the scan response data.
5599 */
5600 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5601
5602 err = hci_req_run(&req, set_bredr_complete);
5603 if (err < 0)
5604 mgmt_pending_remove(cmd);
5605
5606 unlock:
5607 hci_dev_unlock(hdev);
5608 return err;
5609 }
5610
sc_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5611 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5612 {
5613 struct mgmt_pending_cmd *cmd;
5614 struct mgmt_mode *cp;
5615
5616 bt_dev_dbg(hdev, "status %u", status);
5617
5618 hci_dev_lock(hdev);
5619
5620 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5621 if (!cmd)
5622 goto unlock;
5623
5624 if (status) {
5625 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5626 mgmt_status(status));
5627 goto remove;
5628 }
5629
5630 cp = cmd->param;
5631
5632 switch (cp->val) {
5633 case 0x00:
5634 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5635 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5636 break;
5637 case 0x01:
5638 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5639 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5640 break;
5641 case 0x02:
5642 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5643 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5644 break;
5645 }
5646
5647 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5648 new_settings(hdev, cmd->sk);
5649
5650 remove:
5651 mgmt_pending_remove(cmd);
5652 unlock:
5653 hci_dev_unlock(hdev);
5654 }
5655
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5656 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5657 void *data, u16 len)
5658 {
5659 struct mgmt_mode *cp = data;
5660 struct mgmt_pending_cmd *cmd;
5661 struct hci_request req;
5662 u8 val;
5663 int err;
5664
5665 bt_dev_dbg(hdev, "sock %p", sk);
5666
5667 if (!lmp_sc_capable(hdev) &&
5668 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5669 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5670 MGMT_STATUS_NOT_SUPPORTED);
5671
5672 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5673 lmp_sc_capable(hdev) &&
5674 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5675 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5676 MGMT_STATUS_REJECTED);
5677
5678 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5679 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5680 MGMT_STATUS_INVALID_PARAMS);
5681
5682 hci_dev_lock(hdev);
5683
5684 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5685 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5686 bool changed;
5687
5688 if (cp->val) {
5689 changed = !hci_dev_test_and_set_flag(hdev,
5690 HCI_SC_ENABLED);
5691 if (cp->val == 0x02)
5692 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5693 else
5694 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5695 } else {
5696 changed = hci_dev_test_and_clear_flag(hdev,
5697 HCI_SC_ENABLED);
5698 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5699 }
5700
5701 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5702 if (err < 0)
5703 goto failed;
5704
5705 if (changed)
5706 err = new_settings(hdev, sk);
5707
5708 goto failed;
5709 }
5710
5711 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5712 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5713 MGMT_STATUS_BUSY);
5714 goto failed;
5715 }
5716
5717 val = !!cp->val;
5718
5719 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5720 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5721 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5722 goto failed;
5723 }
5724
5725 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5726 if (!cmd) {
5727 err = -ENOMEM;
5728 goto failed;
5729 }
5730
5731 hci_req_init(&req, hdev);
5732 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5733 err = hci_req_run(&req, sc_enable_complete);
5734 if (err < 0) {
5735 mgmt_pending_remove(cmd);
5736 goto failed;
5737 }
5738
5739 failed:
5740 hci_dev_unlock(hdev);
5741 return err;
5742 }
5743
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5744 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5745 void *data, u16 len)
5746 {
5747 struct mgmt_mode *cp = data;
5748 bool changed, use_changed;
5749 int err;
5750
5751 bt_dev_dbg(hdev, "sock %p", sk);
5752
5753 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5755 MGMT_STATUS_INVALID_PARAMS);
5756
5757 hci_dev_lock(hdev);
5758
5759 if (cp->val)
5760 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5761 else
5762 changed = hci_dev_test_and_clear_flag(hdev,
5763 HCI_KEEP_DEBUG_KEYS);
5764
5765 if (cp->val == 0x02)
5766 use_changed = !hci_dev_test_and_set_flag(hdev,
5767 HCI_USE_DEBUG_KEYS);
5768 else
5769 use_changed = hci_dev_test_and_clear_flag(hdev,
5770 HCI_USE_DEBUG_KEYS);
5771
5772 if (hdev_is_powered(hdev) && use_changed &&
5773 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5774 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5775 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5776 sizeof(mode), &mode);
5777 }
5778
5779 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5780 if (err < 0)
5781 goto unlock;
5782
5783 if (changed)
5784 err = new_settings(hdev, sk);
5785
5786 unlock:
5787 hci_dev_unlock(hdev);
5788 return err;
5789 }
5790
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5791 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5792 u16 len)
5793 {
5794 struct mgmt_cp_set_privacy *cp = cp_data;
5795 bool changed;
5796 int err;
5797
5798 bt_dev_dbg(hdev, "sock %p", sk);
5799
5800 if (!lmp_le_capable(hdev))
5801 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5802 MGMT_STATUS_NOT_SUPPORTED);
5803
5804 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5805 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5806 MGMT_STATUS_INVALID_PARAMS);
5807
5808 if (hdev_is_powered(hdev))
5809 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5810 MGMT_STATUS_REJECTED);
5811
5812 hci_dev_lock(hdev);
5813
5814 /* If user space supports this command it is also expected to
5815 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5816 */
5817 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5818
5819 if (cp->privacy) {
5820 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5821 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5822 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5823 hci_adv_instances_set_rpa_expired(hdev, true);
5824 if (cp->privacy == 0x02)
5825 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5826 else
5827 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5828 } else {
5829 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5830 memset(hdev->irk, 0, sizeof(hdev->irk));
5831 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5832 hci_adv_instances_set_rpa_expired(hdev, false);
5833 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5834 }
5835
5836 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5837 if (err < 0)
5838 goto unlock;
5839
5840 if (changed)
5841 err = new_settings(hdev, sk);
5842
5843 unlock:
5844 hci_dev_unlock(hdev);
5845 return err;
5846 }
5847
irk_is_valid(struct mgmt_irk_info * irk)5848 static bool irk_is_valid(struct mgmt_irk_info *irk)
5849 {
5850 switch (irk->addr.type) {
5851 case BDADDR_LE_PUBLIC:
5852 return true;
5853
5854 case BDADDR_LE_RANDOM:
5855 /* Two most significant bits shall be set */
5856 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5857 return false;
5858 return true;
5859 }
5860
5861 return false;
5862 }
5863
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5864 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5865 u16 len)
5866 {
5867 struct mgmt_cp_load_irks *cp = cp_data;
5868 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5869 sizeof(struct mgmt_irk_info));
5870 u16 irk_count, expected_len;
5871 int i, err;
5872
5873 bt_dev_dbg(hdev, "sock %p", sk);
5874
5875 if (!lmp_le_capable(hdev))
5876 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5877 MGMT_STATUS_NOT_SUPPORTED);
5878
5879 irk_count = __le16_to_cpu(cp->irk_count);
5880 if (irk_count > max_irk_count) {
5881 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5882 irk_count);
5883 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5884 MGMT_STATUS_INVALID_PARAMS);
5885 }
5886
5887 expected_len = struct_size(cp, irks, irk_count);
5888 if (expected_len != len) {
5889 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5890 expected_len, len);
5891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5892 MGMT_STATUS_INVALID_PARAMS);
5893 }
5894
5895 bt_dev_dbg(hdev, "irk_count %u", irk_count);
5896
5897 for (i = 0; i < irk_count; i++) {
5898 struct mgmt_irk_info *key = &cp->irks[i];
5899
5900 if (!irk_is_valid(key))
5901 return mgmt_cmd_status(sk, hdev->id,
5902 MGMT_OP_LOAD_IRKS,
5903 MGMT_STATUS_INVALID_PARAMS);
5904 }
5905
5906 hci_dev_lock(hdev);
5907
5908 hci_smp_irks_clear(hdev);
5909
5910 for (i = 0; i < irk_count; i++) {
5911 struct mgmt_irk_info *irk = &cp->irks[i];
5912
5913 if (hci_is_blocked_key(hdev,
5914 HCI_BLOCKED_KEY_TYPE_IRK,
5915 irk->val)) {
5916 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5917 &irk->addr.bdaddr);
5918 continue;
5919 }
5920
5921 hci_add_irk(hdev, &irk->addr.bdaddr,
5922 le_addr_type(irk->addr.type), irk->val,
5923 BDADDR_ANY);
5924 }
5925
5926 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5927
5928 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5929
5930 hci_dev_unlock(hdev);
5931
5932 return err;
5933 }
5934
ltk_is_valid(struct mgmt_ltk_info * key)5935 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5936 {
5937 if (key->master != 0x00 && key->master != 0x01)
5938 return false;
5939
5940 switch (key->addr.type) {
5941 case BDADDR_LE_PUBLIC:
5942 return true;
5943
5944 case BDADDR_LE_RANDOM:
5945 /* Two most significant bits shall be set */
5946 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5947 return false;
5948 return true;
5949 }
5950
5951 return false;
5952 }
5953
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5954 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5955 void *cp_data, u16 len)
5956 {
5957 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5958 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5959 sizeof(struct mgmt_ltk_info));
5960 u16 key_count, expected_len;
5961 int i, err;
5962
5963 bt_dev_dbg(hdev, "sock %p", sk);
5964
5965 if (!lmp_le_capable(hdev))
5966 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5967 MGMT_STATUS_NOT_SUPPORTED);
5968
5969 key_count = __le16_to_cpu(cp->key_count);
5970 if (key_count > max_key_count) {
5971 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5972 key_count);
5973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5974 MGMT_STATUS_INVALID_PARAMS);
5975 }
5976
5977 expected_len = struct_size(cp, keys, key_count);
5978 if (expected_len != len) {
5979 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5980 expected_len, len);
5981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5982 MGMT_STATUS_INVALID_PARAMS);
5983 }
5984
5985 bt_dev_dbg(hdev, "key_count %u", key_count);
5986
5987 for (i = 0; i < key_count; i++) {
5988 struct mgmt_ltk_info *key = &cp->keys[i];
5989
5990 if (!ltk_is_valid(key))
5991 return mgmt_cmd_status(sk, hdev->id,
5992 MGMT_OP_LOAD_LONG_TERM_KEYS,
5993 MGMT_STATUS_INVALID_PARAMS);
5994 }
5995
5996 hci_dev_lock(hdev);
5997
5998 hci_smp_ltks_clear(hdev);
5999
6000 for (i = 0; i < key_count; i++) {
6001 struct mgmt_ltk_info *key = &cp->keys[i];
6002 u8 type, authenticated;
6003
6004 if (hci_is_blocked_key(hdev,
6005 HCI_BLOCKED_KEY_TYPE_LTK,
6006 key->val)) {
6007 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6008 &key->addr.bdaddr);
6009 continue;
6010 }
6011
6012 switch (key->type) {
6013 case MGMT_LTK_UNAUTHENTICATED:
6014 authenticated = 0x00;
6015 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6016 break;
6017 case MGMT_LTK_AUTHENTICATED:
6018 authenticated = 0x01;
6019 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6020 break;
6021 case MGMT_LTK_P256_UNAUTH:
6022 authenticated = 0x00;
6023 type = SMP_LTK_P256;
6024 break;
6025 case MGMT_LTK_P256_AUTH:
6026 authenticated = 0x01;
6027 type = SMP_LTK_P256;
6028 break;
6029 case MGMT_LTK_P256_DEBUG:
6030 authenticated = 0x00;
6031 type = SMP_LTK_P256_DEBUG;
6032 fallthrough;
6033 default:
6034 continue;
6035 }
6036
6037 hci_add_ltk(hdev, &key->addr.bdaddr,
6038 le_addr_type(key->addr.type), type, authenticated,
6039 key->val, key->enc_size, key->ediv, key->rand);
6040 }
6041
6042 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6043 NULL, 0);
6044
6045 hci_dev_unlock(hdev);
6046
6047 return err;
6048 }
6049
conn_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)6050 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6051 {
6052 struct hci_conn *conn = cmd->user_data;
6053 struct mgmt_rp_get_conn_info rp;
6054 int err;
6055
6056 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6057
6058 if (status == MGMT_STATUS_SUCCESS) {
6059 rp.rssi = conn->rssi;
6060 rp.tx_power = conn->tx_power;
6061 rp.max_tx_power = conn->max_tx_power;
6062 } else {
6063 rp.rssi = HCI_RSSI_INVALID;
6064 rp.tx_power = HCI_TX_POWER_INVALID;
6065 rp.max_tx_power = HCI_TX_POWER_INVALID;
6066 }
6067
6068 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6069 status, &rp, sizeof(rp));
6070
6071 hci_conn_drop(conn);
6072 hci_conn_put(conn);
6073
6074 return err;
6075 }
6076
conn_info_refresh_complete(struct hci_dev * hdev,u8 hci_status,u16 opcode)6077 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6078 u16 opcode)
6079 {
6080 struct hci_cp_read_rssi *cp;
6081 struct mgmt_pending_cmd *cmd;
6082 struct hci_conn *conn;
6083 u16 handle;
6084 u8 status;
6085
6086 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6087
6088 hci_dev_lock(hdev);
6089
6090 /* Commands sent in request are either Read RSSI or Read Transmit Power
6091 * Level so we check which one was last sent to retrieve connection
6092 * handle. Both commands have handle as first parameter so it's safe to
6093 * cast data on the same command struct.
6094 *
6095 * First command sent is always Read RSSI and we fail only if it fails.
6096 * In other case we simply override error to indicate success as we
6097 * already remembered if TX power value is actually valid.
6098 */
6099 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6100 if (!cp) {
6101 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6102 status = MGMT_STATUS_SUCCESS;
6103 } else {
6104 status = mgmt_status(hci_status);
6105 }
6106
6107 if (!cp) {
6108 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6109 goto unlock;
6110 }
6111
6112 handle = __le16_to_cpu(cp->handle);
6113 conn = hci_conn_hash_lookup_handle(hdev, handle);
6114 if (!conn) {
6115 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6116 handle);
6117 goto unlock;
6118 }
6119
6120 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6121 if (!cmd)
6122 goto unlock;
6123
6124 cmd->cmd_complete(cmd, status);
6125 mgmt_pending_remove(cmd);
6126
6127 unlock:
6128 hci_dev_unlock(hdev);
6129 }
6130
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6131 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6132 u16 len)
6133 {
6134 struct mgmt_cp_get_conn_info *cp = data;
6135 struct mgmt_rp_get_conn_info rp;
6136 struct hci_conn *conn;
6137 unsigned long conn_info_age;
6138 int err = 0;
6139
6140 bt_dev_dbg(hdev, "sock %p", sk);
6141
6142 memset(&rp, 0, sizeof(rp));
6143 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6144 rp.addr.type = cp->addr.type;
6145
6146 if (!bdaddr_type_is_valid(cp->addr.type))
6147 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6148 MGMT_STATUS_INVALID_PARAMS,
6149 &rp, sizeof(rp));
6150
6151 hci_dev_lock(hdev);
6152
6153 if (!hdev_is_powered(hdev)) {
6154 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6155 MGMT_STATUS_NOT_POWERED, &rp,
6156 sizeof(rp));
6157 goto unlock;
6158 }
6159
6160 if (cp->addr.type == BDADDR_BREDR)
6161 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6162 &cp->addr.bdaddr);
6163 else
6164 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6165
6166 if (!conn || conn->state != BT_CONNECTED) {
6167 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6168 MGMT_STATUS_NOT_CONNECTED, &rp,
6169 sizeof(rp));
6170 goto unlock;
6171 }
6172
6173 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6174 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6175 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6176 goto unlock;
6177 }
6178
6179 /* To avoid client trying to guess when to poll again for information we
6180 * calculate conn info age as random value between min/max set in hdev.
6181 */
6182 conn_info_age = hdev->conn_info_min_age +
6183 prandom_u32_max(hdev->conn_info_max_age -
6184 hdev->conn_info_min_age);
6185
6186 /* Query controller to refresh cached values if they are too old or were
6187 * never read.
6188 */
6189 if (time_after(jiffies, conn->conn_info_timestamp +
6190 msecs_to_jiffies(conn_info_age)) ||
6191 !conn->conn_info_timestamp) {
6192 struct hci_request req;
6193 struct hci_cp_read_tx_power req_txp_cp;
6194 struct hci_cp_read_rssi req_rssi_cp;
6195 struct mgmt_pending_cmd *cmd;
6196
6197 hci_req_init(&req, hdev);
6198 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6199 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6200 &req_rssi_cp);
6201
6202 /* For LE links TX power does not change thus we don't need to
6203 * query for it once value is known.
6204 */
6205 if (!bdaddr_type_is_le(cp->addr.type) ||
6206 conn->tx_power == HCI_TX_POWER_INVALID) {
6207 req_txp_cp.handle = cpu_to_le16(conn->handle);
6208 req_txp_cp.type = 0x00;
6209 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6210 sizeof(req_txp_cp), &req_txp_cp);
6211 }
6212
6213 /* Max TX power needs to be read only once per connection */
6214 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6215 req_txp_cp.handle = cpu_to_le16(conn->handle);
6216 req_txp_cp.type = 0x01;
6217 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6218 sizeof(req_txp_cp), &req_txp_cp);
6219 }
6220
6221 err = hci_req_run(&req, conn_info_refresh_complete);
6222 if (err < 0)
6223 goto unlock;
6224
6225 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6226 data, len);
6227 if (!cmd) {
6228 err = -ENOMEM;
6229 goto unlock;
6230 }
6231
6232 hci_conn_hold(conn);
6233 cmd->user_data = hci_conn_get(conn);
6234 cmd->cmd_complete = conn_info_cmd_complete;
6235
6236 conn->conn_info_timestamp = jiffies;
6237 } else {
6238 /* Cache is valid, just reply with values cached in hci_conn */
6239 rp.rssi = conn->rssi;
6240 rp.tx_power = conn->tx_power;
6241 rp.max_tx_power = conn->max_tx_power;
6242
6243 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6244 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6245 }
6246
6247 unlock:
6248 hci_dev_unlock(hdev);
6249 return err;
6250 }
6251
clock_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)6252 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6253 {
6254 struct hci_conn *conn = cmd->user_data;
6255 struct mgmt_rp_get_clock_info rp;
6256 struct hci_dev *hdev;
6257 int err;
6258
6259 memset(&rp, 0, sizeof(rp));
6260 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6261
6262 if (status)
6263 goto complete;
6264
6265 hdev = hci_dev_get(cmd->index);
6266 if (hdev) {
6267 rp.local_clock = cpu_to_le32(hdev->clock);
6268 hci_dev_put(hdev);
6269 }
6270
6271 if (conn) {
6272 rp.piconet_clock = cpu_to_le32(conn->clock);
6273 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6274 }
6275
6276 complete:
6277 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6278 sizeof(rp));
6279
6280 if (conn) {
6281 hci_conn_drop(conn);
6282 hci_conn_put(conn);
6283 }
6284
6285 return err;
6286 }
6287
get_clock_info_complete(struct hci_dev * hdev,u8 status,u16 opcode)6288 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6289 {
6290 struct hci_cp_read_clock *hci_cp;
6291 struct mgmt_pending_cmd *cmd;
6292 struct hci_conn *conn;
6293
6294 bt_dev_dbg(hdev, "status %u", status);
6295
6296 hci_dev_lock(hdev);
6297
6298 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6299 if (!hci_cp)
6300 goto unlock;
6301
6302 if (hci_cp->which) {
6303 u16 handle = __le16_to_cpu(hci_cp->handle);
6304 conn = hci_conn_hash_lookup_handle(hdev, handle);
6305 } else {
6306 conn = NULL;
6307 }
6308
6309 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6310 if (!cmd)
6311 goto unlock;
6312
6313 cmd->cmd_complete(cmd, mgmt_status(status));
6314 mgmt_pending_remove(cmd);
6315
6316 unlock:
6317 hci_dev_unlock(hdev);
6318 }
6319
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6320 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6321 u16 len)
6322 {
6323 struct mgmt_cp_get_clock_info *cp = data;
6324 struct mgmt_rp_get_clock_info rp;
6325 struct hci_cp_read_clock hci_cp;
6326 struct mgmt_pending_cmd *cmd;
6327 struct hci_request req;
6328 struct hci_conn *conn;
6329 int err;
6330
6331 bt_dev_dbg(hdev, "sock %p", sk);
6332
6333 memset(&rp, 0, sizeof(rp));
6334 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6335 rp.addr.type = cp->addr.type;
6336
6337 if (cp->addr.type != BDADDR_BREDR)
6338 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6339 MGMT_STATUS_INVALID_PARAMS,
6340 &rp, sizeof(rp));
6341
6342 hci_dev_lock(hdev);
6343
6344 if (!hdev_is_powered(hdev)) {
6345 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6346 MGMT_STATUS_NOT_POWERED, &rp,
6347 sizeof(rp));
6348 goto unlock;
6349 }
6350
6351 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6352 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6353 &cp->addr.bdaddr);
6354 if (!conn || conn->state != BT_CONNECTED) {
6355 err = mgmt_cmd_complete(sk, hdev->id,
6356 MGMT_OP_GET_CLOCK_INFO,
6357 MGMT_STATUS_NOT_CONNECTED,
6358 &rp, sizeof(rp));
6359 goto unlock;
6360 }
6361 } else {
6362 conn = NULL;
6363 }
6364
6365 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6366 if (!cmd) {
6367 err = -ENOMEM;
6368 goto unlock;
6369 }
6370
6371 cmd->cmd_complete = clock_info_cmd_complete;
6372
6373 hci_req_init(&req, hdev);
6374
6375 memset(&hci_cp, 0, sizeof(hci_cp));
6376 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6377
6378 if (conn) {
6379 hci_conn_hold(conn);
6380 cmd->user_data = hci_conn_get(conn);
6381
6382 hci_cp.handle = cpu_to_le16(conn->handle);
6383 hci_cp.which = 0x01; /* Piconet clock */
6384 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6385 }
6386
6387 err = hci_req_run(&req, get_clock_info_complete);
6388 if (err < 0)
6389 mgmt_pending_remove(cmd);
6390
6391 unlock:
6392 hci_dev_unlock(hdev);
6393 return err;
6394 }
6395
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)6396 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6397 {
6398 struct hci_conn *conn;
6399
6400 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6401 if (!conn)
6402 return false;
6403
6404 if (conn->dst_type != type)
6405 return false;
6406
6407 if (conn->state != BT_CONNECTED)
6408 return false;
6409
6410 return true;
6411 }
6412
6413 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)6414 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6415 u8 addr_type, u8 auto_connect)
6416 {
6417 struct hci_conn_params *params;
6418
6419 params = hci_conn_params_add(hdev, addr, addr_type);
6420 if (!params)
6421 return -EIO;
6422
6423 if (params->auto_connect == auto_connect)
6424 return 0;
6425
6426 list_del_init(¶ms->action);
6427
6428 switch (auto_connect) {
6429 case HCI_AUTO_CONN_DISABLED:
6430 case HCI_AUTO_CONN_LINK_LOSS:
6431 /* If auto connect is being disabled when we're trying to
6432 * connect to device, keep connecting.
6433 */
6434 if (params->explicit_connect)
6435 list_add(¶ms->action, &hdev->pend_le_conns);
6436 break;
6437 case HCI_AUTO_CONN_REPORT:
6438 if (params->explicit_connect)
6439 list_add(¶ms->action, &hdev->pend_le_conns);
6440 else
6441 list_add(¶ms->action, &hdev->pend_le_reports);
6442 break;
6443 case HCI_AUTO_CONN_DIRECT:
6444 case HCI_AUTO_CONN_ALWAYS:
6445 if (!is_connected(hdev, addr, addr_type))
6446 list_add(¶ms->action, &hdev->pend_le_conns);
6447 break;
6448 }
6449
6450 params->auto_connect = auto_connect;
6451
6452 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6453 addr, addr_type, auto_connect);
6454
6455 return 0;
6456 }
6457
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)6458 static void device_added(struct sock *sk, struct hci_dev *hdev,
6459 bdaddr_t *bdaddr, u8 type, u8 action)
6460 {
6461 struct mgmt_ev_device_added ev;
6462
6463 bacpy(&ev.addr.bdaddr, bdaddr);
6464 ev.addr.type = type;
6465 ev.action = action;
6466
6467 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6468 }
6469
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6470 static int add_device(struct sock *sk, struct hci_dev *hdev,
6471 void *data, u16 len)
6472 {
6473 struct mgmt_cp_add_device *cp = data;
6474 u8 auto_conn, addr_type;
6475 struct hci_conn_params *params;
6476 int err;
6477 u32 current_flags = 0;
6478
6479 bt_dev_dbg(hdev, "sock %p", sk);
6480
6481 if (!bdaddr_type_is_valid(cp->addr.type) ||
6482 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6483 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6484 MGMT_STATUS_INVALID_PARAMS,
6485 &cp->addr, sizeof(cp->addr));
6486
6487 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6488 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6489 MGMT_STATUS_INVALID_PARAMS,
6490 &cp->addr, sizeof(cp->addr));
6491
6492 hci_dev_lock(hdev);
6493
6494 if (cp->addr.type == BDADDR_BREDR) {
6495 /* Only incoming connections action is supported for now */
6496 if (cp->action != 0x01) {
6497 err = mgmt_cmd_complete(sk, hdev->id,
6498 MGMT_OP_ADD_DEVICE,
6499 MGMT_STATUS_INVALID_PARAMS,
6500 &cp->addr, sizeof(cp->addr));
6501 goto unlock;
6502 }
6503
6504 err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6505 &cp->addr.bdaddr,
6506 cp->addr.type, 0);
6507 if (err)
6508 goto unlock;
6509
6510 hci_req_update_scan(hdev);
6511
6512 goto added;
6513 }
6514
6515 addr_type = le_addr_type(cp->addr.type);
6516
6517 if (cp->action == 0x02)
6518 auto_conn = HCI_AUTO_CONN_ALWAYS;
6519 else if (cp->action == 0x01)
6520 auto_conn = HCI_AUTO_CONN_DIRECT;
6521 else
6522 auto_conn = HCI_AUTO_CONN_REPORT;
6523
6524 /* Kernel internally uses conn_params with resolvable private
6525 * address, but Add Device allows only identity addresses.
6526 * Make sure it is enforced before calling
6527 * hci_conn_params_lookup.
6528 */
6529 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6530 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6531 MGMT_STATUS_INVALID_PARAMS,
6532 &cp->addr, sizeof(cp->addr));
6533 goto unlock;
6534 }
6535
6536 /* If the connection parameters don't exist for this device,
6537 * they will be created and configured with defaults.
6538 */
6539 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6540 auto_conn) < 0) {
6541 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6542 MGMT_STATUS_FAILED, &cp->addr,
6543 sizeof(cp->addr));
6544 goto unlock;
6545 } else {
6546 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6547 addr_type);
6548 if (params)
6549 current_flags = params->current_flags;
6550 }
6551
6552 hci_update_background_scan(hdev);
6553
6554 added:
6555 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6556 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6557 SUPPORTED_DEVICE_FLAGS(), current_flags);
6558
6559 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6560 MGMT_STATUS_SUCCESS, &cp->addr,
6561 sizeof(cp->addr));
6562
6563 unlock:
6564 hci_dev_unlock(hdev);
6565 return err;
6566 }
6567
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)6568 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6569 bdaddr_t *bdaddr, u8 type)
6570 {
6571 struct mgmt_ev_device_removed ev;
6572
6573 bacpy(&ev.addr.bdaddr, bdaddr);
6574 ev.addr.type = type;
6575
6576 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6577 }
6578
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6579 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6580 void *data, u16 len)
6581 {
6582 struct mgmt_cp_remove_device *cp = data;
6583 int err;
6584
6585 bt_dev_dbg(hdev, "sock %p", sk);
6586
6587 hci_dev_lock(hdev);
6588
6589 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6590 struct hci_conn_params *params;
6591 u8 addr_type;
6592
6593 if (!bdaddr_type_is_valid(cp->addr.type)) {
6594 err = mgmt_cmd_complete(sk, hdev->id,
6595 MGMT_OP_REMOVE_DEVICE,
6596 MGMT_STATUS_INVALID_PARAMS,
6597 &cp->addr, sizeof(cp->addr));
6598 goto unlock;
6599 }
6600
6601 if (cp->addr.type == BDADDR_BREDR) {
6602 err = hci_bdaddr_list_del(&hdev->whitelist,
6603 &cp->addr.bdaddr,
6604 cp->addr.type);
6605 if (err) {
6606 err = mgmt_cmd_complete(sk, hdev->id,
6607 MGMT_OP_REMOVE_DEVICE,
6608 MGMT_STATUS_INVALID_PARAMS,
6609 &cp->addr,
6610 sizeof(cp->addr));
6611 goto unlock;
6612 }
6613
6614 hci_req_update_scan(hdev);
6615
6616 device_removed(sk, hdev, &cp->addr.bdaddr,
6617 cp->addr.type);
6618 goto complete;
6619 }
6620
6621 addr_type = le_addr_type(cp->addr.type);
6622
6623 /* Kernel internally uses conn_params with resolvable private
6624 * address, but Remove Device allows only identity addresses.
6625 * Make sure it is enforced before calling
6626 * hci_conn_params_lookup.
6627 */
6628 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6629 err = mgmt_cmd_complete(sk, hdev->id,
6630 MGMT_OP_REMOVE_DEVICE,
6631 MGMT_STATUS_INVALID_PARAMS,
6632 &cp->addr, sizeof(cp->addr));
6633 goto unlock;
6634 }
6635
6636 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6637 addr_type);
6638 if (!params) {
6639 err = mgmt_cmd_complete(sk, hdev->id,
6640 MGMT_OP_REMOVE_DEVICE,
6641 MGMT_STATUS_INVALID_PARAMS,
6642 &cp->addr, sizeof(cp->addr));
6643 goto unlock;
6644 }
6645
6646 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6647 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6648 err = mgmt_cmd_complete(sk, hdev->id,
6649 MGMT_OP_REMOVE_DEVICE,
6650 MGMT_STATUS_INVALID_PARAMS,
6651 &cp->addr, sizeof(cp->addr));
6652 goto unlock;
6653 }
6654
6655 list_del(¶ms->action);
6656 list_del(¶ms->list);
6657 kfree(params);
6658 hci_update_background_scan(hdev);
6659
6660 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6661 } else {
6662 struct hci_conn_params *p, *tmp;
6663 struct bdaddr_list *b, *btmp;
6664
6665 if (cp->addr.type) {
6666 err = mgmt_cmd_complete(sk, hdev->id,
6667 MGMT_OP_REMOVE_DEVICE,
6668 MGMT_STATUS_INVALID_PARAMS,
6669 &cp->addr, sizeof(cp->addr));
6670 goto unlock;
6671 }
6672
6673 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6674 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6675 list_del(&b->list);
6676 kfree(b);
6677 }
6678
6679 hci_req_update_scan(hdev);
6680
6681 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6682 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6683 continue;
6684 device_removed(sk, hdev, &p->addr, p->addr_type);
6685 if (p->explicit_connect) {
6686 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6687 continue;
6688 }
6689 list_del(&p->action);
6690 list_del(&p->list);
6691 kfree(p);
6692 }
6693
6694 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6695
6696 hci_update_background_scan(hdev);
6697 }
6698
6699 complete:
6700 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6701 MGMT_STATUS_SUCCESS, &cp->addr,
6702 sizeof(cp->addr));
6703 unlock:
6704 hci_dev_unlock(hdev);
6705 return err;
6706 }
6707
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6708 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6709 u16 len)
6710 {
6711 struct mgmt_cp_load_conn_param *cp = data;
6712 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6713 sizeof(struct mgmt_conn_param));
6714 u16 param_count, expected_len;
6715 int i;
6716
6717 if (!lmp_le_capable(hdev))
6718 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6719 MGMT_STATUS_NOT_SUPPORTED);
6720
6721 param_count = __le16_to_cpu(cp->param_count);
6722 if (param_count > max_param_count) {
6723 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6724 param_count);
6725 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6726 MGMT_STATUS_INVALID_PARAMS);
6727 }
6728
6729 expected_len = struct_size(cp, params, param_count);
6730 if (expected_len != len) {
6731 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6732 expected_len, len);
6733 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6734 MGMT_STATUS_INVALID_PARAMS);
6735 }
6736
6737 bt_dev_dbg(hdev, "param_count %u", param_count);
6738
6739 hci_dev_lock(hdev);
6740
6741 hci_conn_params_clear_disabled(hdev);
6742
6743 for (i = 0; i < param_count; i++) {
6744 struct mgmt_conn_param *param = &cp->params[i];
6745 struct hci_conn_params *hci_param;
6746 u16 min, max, latency, timeout;
6747 u8 addr_type;
6748
6749 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
6750 param->addr.type);
6751
6752 if (param->addr.type == BDADDR_LE_PUBLIC) {
6753 addr_type = ADDR_LE_DEV_PUBLIC;
6754 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6755 addr_type = ADDR_LE_DEV_RANDOM;
6756 } else {
6757 bt_dev_err(hdev, "ignoring invalid connection parameters");
6758 continue;
6759 }
6760
6761 min = le16_to_cpu(param->min_interval);
6762 max = le16_to_cpu(param->max_interval);
6763 latency = le16_to_cpu(param->latency);
6764 timeout = le16_to_cpu(param->timeout);
6765
6766 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6767 min, max, latency, timeout);
6768
6769 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6770 bt_dev_err(hdev, "ignoring invalid connection parameters");
6771 continue;
6772 }
6773
6774 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6775 addr_type);
6776 if (!hci_param) {
6777 bt_dev_err(hdev, "failed to add connection parameters");
6778 continue;
6779 }
6780
6781 hci_param->conn_min_interval = min;
6782 hci_param->conn_max_interval = max;
6783 hci_param->conn_latency = latency;
6784 hci_param->supervision_timeout = timeout;
6785 }
6786
6787 hci_dev_unlock(hdev);
6788
6789 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6790 NULL, 0);
6791 }
6792
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6793 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6794 void *data, u16 len)
6795 {
6796 struct mgmt_cp_set_external_config *cp = data;
6797 bool changed;
6798 int err;
6799
6800 bt_dev_dbg(hdev, "sock %p", sk);
6801
6802 if (hdev_is_powered(hdev))
6803 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6804 MGMT_STATUS_REJECTED);
6805
6806 if (cp->config != 0x00 && cp->config != 0x01)
6807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6808 MGMT_STATUS_INVALID_PARAMS);
6809
6810 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6811 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6812 MGMT_STATUS_NOT_SUPPORTED);
6813
6814 hci_dev_lock(hdev);
6815
6816 if (cp->config)
6817 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6818 else
6819 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6820
6821 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6822 if (err < 0)
6823 goto unlock;
6824
6825 if (!changed)
6826 goto unlock;
6827
6828 err = new_options(hdev, sk);
6829
6830 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6831 mgmt_index_removed(hdev);
6832
6833 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6834 hci_dev_set_flag(hdev, HCI_CONFIG);
6835 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6836
6837 queue_work(hdev->req_workqueue, &hdev->power_on);
6838 } else {
6839 set_bit(HCI_RAW, &hdev->flags);
6840 mgmt_index_added(hdev);
6841 }
6842 }
6843
6844 unlock:
6845 hci_dev_unlock(hdev);
6846 return err;
6847 }
6848
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6849 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6850 void *data, u16 len)
6851 {
6852 struct mgmt_cp_set_public_address *cp = data;
6853 bool changed;
6854 int err;
6855
6856 bt_dev_dbg(hdev, "sock %p", sk);
6857
6858 if (hdev_is_powered(hdev))
6859 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6860 MGMT_STATUS_REJECTED);
6861
6862 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6864 MGMT_STATUS_INVALID_PARAMS);
6865
6866 if (!hdev->set_bdaddr)
6867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6868 MGMT_STATUS_NOT_SUPPORTED);
6869
6870 hci_dev_lock(hdev);
6871
6872 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6873 bacpy(&hdev->public_addr, &cp->bdaddr);
6874
6875 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6876 if (err < 0)
6877 goto unlock;
6878
6879 if (!changed)
6880 goto unlock;
6881
6882 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6883 err = new_options(hdev, sk);
6884
6885 if (is_configured(hdev)) {
6886 mgmt_index_removed(hdev);
6887
6888 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6889
6890 hci_dev_set_flag(hdev, HCI_CONFIG);
6891 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6892
6893 queue_work(hdev->req_workqueue, &hdev->power_on);
6894 }
6895
6896 unlock:
6897 hci_dev_unlock(hdev);
6898 return err;
6899 }
6900
read_local_oob_ext_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)6901 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6902 u16 opcode, struct sk_buff *skb)
6903 {
6904 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6905 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6906 u8 *h192, *r192, *h256, *r256;
6907 struct mgmt_pending_cmd *cmd;
6908 u16 eir_len;
6909 int err;
6910
6911 bt_dev_dbg(hdev, "status %u", status);
6912
6913 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6914 if (!cmd)
6915 return;
6916
6917 mgmt_cp = cmd->param;
6918
6919 if (status) {
6920 status = mgmt_status(status);
6921 eir_len = 0;
6922
6923 h192 = NULL;
6924 r192 = NULL;
6925 h256 = NULL;
6926 r256 = NULL;
6927 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6928 struct hci_rp_read_local_oob_data *rp;
6929
6930 if (skb->len != sizeof(*rp)) {
6931 status = MGMT_STATUS_FAILED;
6932 eir_len = 0;
6933 } else {
6934 status = MGMT_STATUS_SUCCESS;
6935 rp = (void *)skb->data;
6936
6937 eir_len = 5 + 18 + 18;
6938 h192 = rp->hash;
6939 r192 = rp->rand;
6940 h256 = NULL;
6941 r256 = NULL;
6942 }
6943 } else {
6944 struct hci_rp_read_local_oob_ext_data *rp;
6945
6946 if (skb->len != sizeof(*rp)) {
6947 status = MGMT_STATUS_FAILED;
6948 eir_len = 0;
6949 } else {
6950 status = MGMT_STATUS_SUCCESS;
6951 rp = (void *)skb->data;
6952
6953 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6954 eir_len = 5 + 18 + 18;
6955 h192 = NULL;
6956 r192 = NULL;
6957 } else {
6958 eir_len = 5 + 18 + 18 + 18 + 18;
6959 h192 = rp->hash192;
6960 r192 = rp->rand192;
6961 }
6962
6963 h256 = rp->hash256;
6964 r256 = rp->rand256;
6965 }
6966 }
6967
6968 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6969 if (!mgmt_rp)
6970 goto done;
6971
6972 if (status)
6973 goto send_rsp;
6974
6975 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6976 hdev->dev_class, 3);
6977
6978 if (h192 && r192) {
6979 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6980 EIR_SSP_HASH_C192, h192, 16);
6981 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6982 EIR_SSP_RAND_R192, r192, 16);
6983 }
6984
6985 if (h256 && r256) {
6986 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6987 EIR_SSP_HASH_C256, h256, 16);
6988 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6989 EIR_SSP_RAND_R256, r256, 16);
6990 }
6991
6992 send_rsp:
6993 mgmt_rp->type = mgmt_cp->type;
6994 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6995
6996 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6997 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6998 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6999 if (err < 0 || status)
7000 goto done;
7001
7002 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7003
7004 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7005 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7006 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7007 done:
7008 kfree(mgmt_rp);
7009 mgmt_pending_remove(cmd);
7010 }
7011
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)7012 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7013 struct mgmt_cp_read_local_oob_ext_data *cp)
7014 {
7015 struct mgmt_pending_cmd *cmd;
7016 struct hci_request req;
7017 int err;
7018
7019 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7020 cp, sizeof(*cp));
7021 if (!cmd)
7022 return -ENOMEM;
7023
7024 hci_req_init(&req, hdev);
7025
7026 if (bredr_sc_enabled(hdev))
7027 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7028 else
7029 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7030
7031 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7032 if (err < 0) {
7033 mgmt_pending_remove(cmd);
7034 return err;
7035 }
7036
7037 return 0;
7038 }
7039
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7040 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7041 void *data, u16 data_len)
7042 {
7043 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7044 struct mgmt_rp_read_local_oob_ext_data *rp;
7045 size_t rp_len;
7046 u16 eir_len;
7047 u8 status, flags, role, addr[7], hash[16], rand[16];
7048 int err;
7049
7050 bt_dev_dbg(hdev, "sock %p", sk);
7051
7052 if (hdev_is_powered(hdev)) {
7053 switch (cp->type) {
7054 case BIT(BDADDR_BREDR):
7055 status = mgmt_bredr_support(hdev);
7056 if (status)
7057 eir_len = 0;
7058 else
7059 eir_len = 5;
7060 break;
7061 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7062 status = mgmt_le_support(hdev);
7063 if (status)
7064 eir_len = 0;
7065 else
7066 eir_len = 9 + 3 + 18 + 18 + 3;
7067 break;
7068 default:
7069 status = MGMT_STATUS_INVALID_PARAMS;
7070 eir_len = 0;
7071 break;
7072 }
7073 } else {
7074 status = MGMT_STATUS_NOT_POWERED;
7075 eir_len = 0;
7076 }
7077
7078 rp_len = sizeof(*rp) + eir_len;
7079 rp = kmalloc(rp_len, GFP_ATOMIC);
7080 if (!rp)
7081 return -ENOMEM;
7082
7083 if (status)
7084 goto complete;
7085
7086 hci_dev_lock(hdev);
7087
7088 eir_len = 0;
7089 switch (cp->type) {
7090 case BIT(BDADDR_BREDR):
7091 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7092 err = read_local_ssp_oob_req(hdev, sk, cp);
7093 hci_dev_unlock(hdev);
7094 if (!err)
7095 goto done;
7096
7097 status = MGMT_STATUS_FAILED;
7098 goto complete;
7099 } else {
7100 eir_len = eir_append_data(rp->eir, eir_len,
7101 EIR_CLASS_OF_DEV,
7102 hdev->dev_class, 3);
7103 }
7104 break;
7105 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7106 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7107 smp_generate_oob(hdev, hash, rand) < 0) {
7108 hci_dev_unlock(hdev);
7109 status = MGMT_STATUS_FAILED;
7110 goto complete;
7111 }
7112
7113 /* This should return the active RPA, but since the RPA
7114 * is only programmed on demand, it is really hard to fill
7115 * this in at the moment. For now disallow retrieving
7116 * local out-of-band data when privacy is in use.
7117 *
7118 * Returning the identity address will not help here since
7119 * pairing happens before the identity resolving key is
7120 * known and thus the connection establishment happens
7121 * based on the RPA and not the identity address.
7122 */
7123 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7124 hci_dev_unlock(hdev);
7125 status = MGMT_STATUS_REJECTED;
7126 goto complete;
7127 }
7128
7129 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7130 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7131 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7132 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7133 memcpy(addr, &hdev->static_addr, 6);
7134 addr[6] = 0x01;
7135 } else {
7136 memcpy(addr, &hdev->bdaddr, 6);
7137 addr[6] = 0x00;
7138 }
7139
7140 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7141 addr, sizeof(addr));
7142
7143 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7144 role = 0x02;
7145 else
7146 role = 0x01;
7147
7148 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7149 &role, sizeof(role));
7150
7151 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7152 eir_len = eir_append_data(rp->eir, eir_len,
7153 EIR_LE_SC_CONFIRM,
7154 hash, sizeof(hash));
7155
7156 eir_len = eir_append_data(rp->eir, eir_len,
7157 EIR_LE_SC_RANDOM,
7158 rand, sizeof(rand));
7159 }
7160
7161 flags = mgmt_get_adv_discov_flags(hdev);
7162
7163 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7164 flags |= LE_AD_NO_BREDR;
7165
7166 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7167 &flags, sizeof(flags));
7168 break;
7169 }
7170
7171 hci_dev_unlock(hdev);
7172
7173 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7174
7175 status = MGMT_STATUS_SUCCESS;
7176
7177 complete:
7178 rp->type = cp->type;
7179 rp->eir_len = cpu_to_le16(eir_len);
7180
7181 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7182 status, rp, sizeof(*rp) + eir_len);
7183 if (err < 0 || status)
7184 goto done;
7185
7186 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7187 rp, sizeof(*rp) + eir_len,
7188 HCI_MGMT_OOB_DATA_EVENTS, sk);
7189
7190 done:
7191 kfree(rp);
7192
7193 return err;
7194 }
7195
get_supported_adv_flags(struct hci_dev * hdev)7196 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7197 {
7198 u32 flags = 0;
7199
7200 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7201 flags |= MGMT_ADV_FLAG_DISCOV;
7202 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7203 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7204 flags |= MGMT_ADV_FLAG_APPEARANCE;
7205 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7206
7207 /* In extended adv TX_POWER returned from Set Adv Param
7208 * will be always valid.
7209 */
7210 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7211 ext_adv_capable(hdev))
7212 flags |= MGMT_ADV_FLAG_TX_POWER;
7213
7214 if (ext_adv_capable(hdev)) {
7215 flags |= MGMT_ADV_FLAG_SEC_1M;
7216 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7217 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7218
7219 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7220 flags |= MGMT_ADV_FLAG_SEC_2M;
7221
7222 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7223 flags |= MGMT_ADV_FLAG_SEC_CODED;
7224 }
7225
7226 return flags;
7227 }
7228
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7229 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7230 void *data, u16 data_len)
7231 {
7232 struct mgmt_rp_read_adv_features *rp;
7233 size_t rp_len;
7234 int err;
7235 struct adv_info *adv_instance;
7236 u32 supported_flags;
7237 u8 *instance;
7238
7239 bt_dev_dbg(hdev, "sock %p", sk);
7240
7241 if (!lmp_le_capable(hdev))
7242 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7243 MGMT_STATUS_REJECTED);
7244
7245 /* Enabling the experimental LL Privay support disables support for
7246 * advertising.
7247 */
7248 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7249 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7250 MGMT_STATUS_NOT_SUPPORTED);
7251
7252 hci_dev_lock(hdev);
7253
7254 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7255 rp = kmalloc(rp_len, GFP_ATOMIC);
7256 if (!rp) {
7257 hci_dev_unlock(hdev);
7258 return -ENOMEM;
7259 }
7260
7261 supported_flags = get_supported_adv_flags(hdev);
7262
7263 rp->supported_flags = cpu_to_le32(supported_flags);
7264 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7265 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7266 rp->max_instances = hdev->le_num_of_adv_sets;
7267 rp->num_instances = hdev->adv_instance_cnt;
7268
7269 instance = rp->instance;
7270 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7271 *instance = adv_instance->instance;
7272 instance++;
7273 }
7274
7275 hci_dev_unlock(hdev);
7276
7277 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7278 MGMT_STATUS_SUCCESS, rp, rp_len);
7279
7280 kfree(rp);
7281
7282 return err;
7283 }
7284
calculate_name_len(struct hci_dev * hdev)7285 static u8 calculate_name_len(struct hci_dev *hdev)
7286 {
7287 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7288
7289 return append_local_name(hdev, buf, 0);
7290 }
7291
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)7292 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7293 bool is_adv_data)
7294 {
7295 u8 max_len = HCI_MAX_AD_LENGTH;
7296
7297 if (is_adv_data) {
7298 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7299 MGMT_ADV_FLAG_LIMITED_DISCOV |
7300 MGMT_ADV_FLAG_MANAGED_FLAGS))
7301 max_len -= 3;
7302
7303 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7304 max_len -= 3;
7305 } else {
7306 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7307 max_len -= calculate_name_len(hdev);
7308
7309 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7310 max_len -= 4;
7311 }
7312
7313 return max_len;
7314 }
7315
flags_managed(u32 adv_flags)7316 static bool flags_managed(u32 adv_flags)
7317 {
7318 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7319 MGMT_ADV_FLAG_LIMITED_DISCOV |
7320 MGMT_ADV_FLAG_MANAGED_FLAGS);
7321 }
7322
tx_power_managed(u32 adv_flags)7323 static bool tx_power_managed(u32 adv_flags)
7324 {
7325 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7326 }
7327
name_managed(u32 adv_flags)7328 static bool name_managed(u32 adv_flags)
7329 {
7330 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7331 }
7332
appearance_managed(u32 adv_flags)7333 static bool appearance_managed(u32 adv_flags)
7334 {
7335 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7336 }
7337
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)7338 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7339 u8 len, bool is_adv_data)
7340 {
7341 int i, cur_len;
7342 u8 max_len;
7343
7344 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7345
7346 if (len > max_len)
7347 return false;
7348
7349 /* Make sure that the data is correctly formatted. */
7350 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7351 cur_len = data[i];
7352
7353 if (data[i + 1] == EIR_FLAGS &&
7354 (!is_adv_data || flags_managed(adv_flags)))
7355 return false;
7356
7357 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7358 return false;
7359
7360 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7361 return false;
7362
7363 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7364 return false;
7365
7366 if (data[i + 1] == EIR_APPEARANCE &&
7367 appearance_managed(adv_flags))
7368 return false;
7369
7370 /* If the current field length would exceed the total data
7371 * length, then it's invalid.
7372 */
7373 if (i + cur_len >= len)
7374 return false;
7375 }
7376
7377 return true;
7378 }
7379
add_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)7380 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7381 u16 opcode)
7382 {
7383 struct mgmt_pending_cmd *cmd;
7384 struct mgmt_cp_add_advertising *cp;
7385 struct mgmt_rp_add_advertising rp;
7386 struct adv_info *adv_instance, *n;
7387 u8 instance;
7388
7389 bt_dev_dbg(hdev, "status %d", status);
7390
7391 hci_dev_lock(hdev);
7392
7393 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7394
7395 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7396 if (!adv_instance->pending)
7397 continue;
7398
7399 if (!status) {
7400 adv_instance->pending = false;
7401 continue;
7402 }
7403
7404 instance = adv_instance->instance;
7405
7406 if (hdev->cur_adv_instance == instance)
7407 cancel_adv_timeout(hdev);
7408
7409 hci_remove_adv_instance(hdev, instance);
7410 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7411 }
7412
7413 if (!cmd)
7414 goto unlock;
7415
7416 cp = cmd->param;
7417 rp.instance = cp->instance;
7418
7419 if (status)
7420 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7421 mgmt_status(status));
7422 else
7423 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7424 mgmt_status(status), &rp, sizeof(rp));
7425
7426 mgmt_pending_remove(cmd);
7427
7428 unlock:
7429 hci_dev_unlock(hdev);
7430 }
7431
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7432 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7433 void *data, u16 data_len)
7434 {
7435 struct mgmt_cp_add_advertising *cp = data;
7436 struct mgmt_rp_add_advertising rp;
7437 u32 flags;
7438 u32 supported_flags, phy_flags;
7439 u8 status;
7440 u16 timeout, duration;
7441 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7442 u8 schedule_instance = 0;
7443 struct adv_info *next_instance;
7444 int err;
7445 struct mgmt_pending_cmd *cmd;
7446 struct hci_request req;
7447
7448 bt_dev_dbg(hdev, "sock %p", sk);
7449
7450 status = mgmt_le_support(hdev);
7451 if (status)
7452 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7453 status);
7454
7455 /* Enabling the experimental LL Privay support disables support for
7456 * advertising.
7457 */
7458 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7459 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7460 MGMT_STATUS_NOT_SUPPORTED);
7461
7462 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7463 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7464 MGMT_STATUS_INVALID_PARAMS);
7465
7466 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7467 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7468 MGMT_STATUS_INVALID_PARAMS);
7469
7470 flags = __le32_to_cpu(cp->flags);
7471 timeout = __le16_to_cpu(cp->timeout);
7472 duration = __le16_to_cpu(cp->duration);
7473
7474 /* The current implementation only supports a subset of the specified
7475 * flags. Also need to check mutual exclusiveness of sec flags.
7476 */
7477 supported_flags = get_supported_adv_flags(hdev);
7478 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
7479 if (flags & ~supported_flags ||
7480 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7481 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7482 MGMT_STATUS_INVALID_PARAMS);
7483
7484 hci_dev_lock(hdev);
7485
7486 if (timeout && !hdev_is_powered(hdev)) {
7487 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7488 MGMT_STATUS_REJECTED);
7489 goto unlock;
7490 }
7491
7492 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7493 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7494 pending_find(MGMT_OP_SET_LE, hdev)) {
7495 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7496 MGMT_STATUS_BUSY);
7497 goto unlock;
7498 }
7499
7500 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7501 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7502 cp->scan_rsp_len, false)) {
7503 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7504 MGMT_STATUS_INVALID_PARAMS);
7505 goto unlock;
7506 }
7507
7508 err = hci_add_adv_instance(hdev, cp->instance, flags,
7509 cp->adv_data_len, cp->data,
7510 cp->scan_rsp_len,
7511 cp->data + cp->adv_data_len,
7512 timeout, duration);
7513 if (err < 0) {
7514 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7515 MGMT_STATUS_FAILED);
7516 goto unlock;
7517 }
7518
7519 /* Only trigger an advertising added event if a new instance was
7520 * actually added.
7521 */
7522 if (hdev->adv_instance_cnt > prev_instance_cnt)
7523 mgmt_advertising_added(sk, hdev, cp->instance);
7524
7525 if (hdev->cur_adv_instance == cp->instance) {
7526 /* If the currently advertised instance is being changed then
7527 * cancel the current advertising and schedule the next
7528 * instance. If there is only one instance then the overridden
7529 * advertising data will be visible right away.
7530 */
7531 cancel_adv_timeout(hdev);
7532
7533 next_instance = hci_get_next_instance(hdev, cp->instance);
7534 if (next_instance)
7535 schedule_instance = next_instance->instance;
7536 } else if (!hdev->adv_instance_timeout) {
7537 /* Immediately advertise the new instance if no other
7538 * instance is currently being advertised.
7539 */
7540 schedule_instance = cp->instance;
7541 }
7542
7543 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7544 * there is no instance to be advertised then we have no HCI
7545 * communication to make. Simply return.
7546 */
7547 if (!hdev_is_powered(hdev) ||
7548 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7549 !schedule_instance) {
7550 rp.instance = cp->instance;
7551 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7552 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7553 goto unlock;
7554 }
7555
7556 /* We're good to go, update advertising data, parameters, and start
7557 * advertising.
7558 */
7559 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7560 data_len);
7561 if (!cmd) {
7562 err = -ENOMEM;
7563 goto unlock;
7564 }
7565
7566 hci_req_init(&req, hdev);
7567
7568 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7569
7570 if (!err)
7571 err = hci_req_run(&req, add_advertising_complete);
7572
7573 if (err < 0) {
7574 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7575 MGMT_STATUS_FAILED);
7576 mgmt_pending_remove(cmd);
7577 }
7578
7579 unlock:
7580 hci_dev_unlock(hdev);
7581
7582 return err;
7583 }
7584
remove_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)7585 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7586 u16 opcode)
7587 {
7588 struct mgmt_pending_cmd *cmd;
7589 struct mgmt_cp_remove_advertising *cp;
7590 struct mgmt_rp_remove_advertising rp;
7591
7592 bt_dev_dbg(hdev, "status %d", status);
7593
7594 hci_dev_lock(hdev);
7595
7596 /* A failure status here only means that we failed to disable
7597 * advertising. Otherwise, the advertising instance has been removed,
7598 * so report success.
7599 */
7600 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7601 if (!cmd)
7602 goto unlock;
7603
7604 cp = cmd->param;
7605 rp.instance = cp->instance;
7606
7607 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7608 &rp, sizeof(rp));
7609 mgmt_pending_remove(cmd);
7610
7611 unlock:
7612 hci_dev_unlock(hdev);
7613 }
7614
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7615 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7616 void *data, u16 data_len)
7617 {
7618 struct mgmt_cp_remove_advertising *cp = data;
7619 struct mgmt_rp_remove_advertising rp;
7620 struct mgmt_pending_cmd *cmd;
7621 struct hci_request req;
7622 int err;
7623
7624 bt_dev_dbg(hdev, "sock %p", sk);
7625
7626 /* Enabling the experimental LL Privay support disables support for
7627 * advertising.
7628 */
7629 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7631 MGMT_STATUS_NOT_SUPPORTED);
7632
7633 hci_dev_lock(hdev);
7634
7635 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7636 err = mgmt_cmd_status(sk, hdev->id,
7637 MGMT_OP_REMOVE_ADVERTISING,
7638 MGMT_STATUS_INVALID_PARAMS);
7639 goto unlock;
7640 }
7641
7642 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7643 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7644 pending_find(MGMT_OP_SET_LE, hdev)) {
7645 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7646 MGMT_STATUS_BUSY);
7647 goto unlock;
7648 }
7649
7650 if (list_empty(&hdev->adv_instances)) {
7651 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7652 MGMT_STATUS_INVALID_PARAMS);
7653 goto unlock;
7654 }
7655
7656 hci_req_init(&req, hdev);
7657
7658 /* If we use extended advertising, instance is disabled and removed */
7659 if (ext_adv_capable(hdev)) {
7660 __hci_req_disable_ext_adv_instance(&req, cp->instance);
7661 __hci_req_remove_ext_adv_instance(&req, cp->instance);
7662 }
7663
7664 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7665
7666 if (list_empty(&hdev->adv_instances))
7667 __hci_req_disable_advertising(&req);
7668
7669 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
7670 * flag is set or the device isn't powered then we have no HCI
7671 * communication to make. Simply return.
7672 */
7673 if (skb_queue_empty(&req.cmd_q) ||
7674 !hdev_is_powered(hdev) ||
7675 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7676 hci_req_purge(&req);
7677 rp.instance = cp->instance;
7678 err = mgmt_cmd_complete(sk, hdev->id,
7679 MGMT_OP_REMOVE_ADVERTISING,
7680 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7681 goto unlock;
7682 }
7683
7684 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7685 data_len);
7686 if (!cmd) {
7687 err = -ENOMEM;
7688 goto unlock;
7689 }
7690
7691 err = hci_req_run(&req, remove_advertising_complete);
7692 if (err < 0)
7693 mgmt_pending_remove(cmd);
7694
7695 unlock:
7696 hci_dev_unlock(hdev);
7697
7698 return err;
7699 }
7700
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7701 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7702 void *data, u16 data_len)
7703 {
7704 struct mgmt_cp_get_adv_size_info *cp = data;
7705 struct mgmt_rp_get_adv_size_info rp;
7706 u32 flags, supported_flags;
7707 int err;
7708
7709 bt_dev_dbg(hdev, "sock %p", sk);
7710
7711 if (!lmp_le_capable(hdev))
7712 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7713 MGMT_STATUS_REJECTED);
7714
7715 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7716 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7717 MGMT_STATUS_INVALID_PARAMS);
7718
7719 flags = __le32_to_cpu(cp->flags);
7720
7721 /* The current implementation only supports a subset of the specified
7722 * flags.
7723 */
7724 supported_flags = get_supported_adv_flags(hdev);
7725 if (flags & ~supported_flags)
7726 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7727 MGMT_STATUS_INVALID_PARAMS);
7728
7729 rp.instance = cp->instance;
7730 rp.flags = cp->flags;
7731 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7732 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7733
7734 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7735 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7736
7737 return err;
7738 }
7739
7740 static const struct hci_mgmt_handler mgmt_handlers[] = {
7741 { NULL }, /* 0x0000 (no command) */
7742 { read_version, MGMT_READ_VERSION_SIZE,
7743 HCI_MGMT_NO_HDEV |
7744 HCI_MGMT_UNTRUSTED },
7745 { read_commands, MGMT_READ_COMMANDS_SIZE,
7746 HCI_MGMT_NO_HDEV |
7747 HCI_MGMT_UNTRUSTED },
7748 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7749 HCI_MGMT_NO_HDEV |
7750 HCI_MGMT_UNTRUSTED },
7751 { read_controller_info, MGMT_READ_INFO_SIZE,
7752 HCI_MGMT_UNTRUSTED },
7753 { set_powered, MGMT_SETTING_SIZE },
7754 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7755 { set_connectable, MGMT_SETTING_SIZE },
7756 { set_fast_connectable, MGMT_SETTING_SIZE },
7757 { set_bondable, MGMT_SETTING_SIZE },
7758 { set_link_security, MGMT_SETTING_SIZE },
7759 { set_ssp, MGMT_SETTING_SIZE },
7760 { set_hs, MGMT_SETTING_SIZE },
7761 { set_le, MGMT_SETTING_SIZE },
7762 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7763 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7764 { add_uuid, MGMT_ADD_UUID_SIZE },
7765 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7766 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7767 HCI_MGMT_VAR_LEN },
7768 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7769 HCI_MGMT_VAR_LEN },
7770 { disconnect, MGMT_DISCONNECT_SIZE },
7771 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7772 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7773 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7774 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7775 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7776 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7777 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7778 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7779 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7780 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7781 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7782 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7783 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7784 HCI_MGMT_VAR_LEN },
7785 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7786 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7787 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7788 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7789 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7790 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7791 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7792 { set_advertising, MGMT_SETTING_SIZE },
7793 { set_bredr, MGMT_SETTING_SIZE },
7794 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7795 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7796 { set_secure_conn, MGMT_SETTING_SIZE },
7797 { set_debug_keys, MGMT_SETTING_SIZE },
7798 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7799 { load_irks, MGMT_LOAD_IRKS_SIZE,
7800 HCI_MGMT_VAR_LEN },
7801 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7802 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7803 { add_device, MGMT_ADD_DEVICE_SIZE },
7804 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7805 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7806 HCI_MGMT_VAR_LEN },
7807 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7808 HCI_MGMT_NO_HDEV |
7809 HCI_MGMT_UNTRUSTED },
7810 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7811 HCI_MGMT_UNCONFIGURED |
7812 HCI_MGMT_UNTRUSTED },
7813 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7814 HCI_MGMT_UNCONFIGURED },
7815 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7816 HCI_MGMT_UNCONFIGURED },
7817 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7818 HCI_MGMT_VAR_LEN },
7819 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7820 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7821 HCI_MGMT_NO_HDEV |
7822 HCI_MGMT_UNTRUSTED },
7823 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7824 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7825 HCI_MGMT_VAR_LEN },
7826 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7827 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
7828 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7829 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7830 HCI_MGMT_UNTRUSTED },
7831 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
7832 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
7833 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
7834 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7835 HCI_MGMT_VAR_LEN },
7836 { set_wideband_speech, MGMT_SETTING_SIZE },
7837 { read_security_info, MGMT_READ_SECURITY_INFO_SIZE,
7838 HCI_MGMT_UNTRUSTED },
7839 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
7840 HCI_MGMT_UNTRUSTED |
7841 HCI_MGMT_HDEV_OPTIONAL },
7842 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
7843 HCI_MGMT_VAR_LEN |
7844 HCI_MGMT_HDEV_OPTIONAL },
7845 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
7846 HCI_MGMT_UNTRUSTED },
7847 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
7848 HCI_MGMT_VAR_LEN },
7849 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
7850 HCI_MGMT_UNTRUSTED },
7851 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
7852 HCI_MGMT_VAR_LEN },
7853 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
7854 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
7855 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
7856 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
7857 HCI_MGMT_VAR_LEN },
7858 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
7859 };
7860
mgmt_index_added(struct hci_dev * hdev)7861 void mgmt_index_added(struct hci_dev *hdev)
7862 {
7863 struct mgmt_ev_ext_index ev;
7864
7865 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7866 return;
7867
7868 switch (hdev->dev_type) {
7869 case HCI_PRIMARY:
7870 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7871 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7872 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7873 ev.type = 0x01;
7874 } else {
7875 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7876 HCI_MGMT_INDEX_EVENTS);
7877 ev.type = 0x00;
7878 }
7879 break;
7880 case HCI_AMP:
7881 ev.type = 0x02;
7882 break;
7883 default:
7884 return;
7885 }
7886
7887 ev.bus = hdev->bus;
7888
7889 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7890 HCI_MGMT_EXT_INDEX_EVENTS);
7891 }
7892
mgmt_index_removed(struct hci_dev * hdev)7893 void mgmt_index_removed(struct hci_dev *hdev)
7894 {
7895 struct mgmt_ev_ext_index ev;
7896 u8 status = MGMT_STATUS_INVALID_INDEX;
7897
7898 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7899 return;
7900
7901 switch (hdev->dev_type) {
7902 case HCI_PRIMARY:
7903 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7904
7905 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7906 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7907 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7908 ev.type = 0x01;
7909 } else {
7910 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7911 HCI_MGMT_INDEX_EVENTS);
7912 ev.type = 0x00;
7913 }
7914 break;
7915 case HCI_AMP:
7916 ev.type = 0x02;
7917 break;
7918 default:
7919 return;
7920 }
7921
7922 ev.bus = hdev->bus;
7923
7924 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7925 HCI_MGMT_EXT_INDEX_EVENTS);
7926 }
7927
7928 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)7929 static void restart_le_actions(struct hci_dev *hdev)
7930 {
7931 struct hci_conn_params *p;
7932
7933 list_for_each_entry(p, &hdev->le_conn_params, list) {
7934 /* Needed for AUTO_OFF case where might not "really"
7935 * have been powered off.
7936 */
7937 list_del_init(&p->action);
7938
7939 switch (p->auto_connect) {
7940 case HCI_AUTO_CONN_DIRECT:
7941 case HCI_AUTO_CONN_ALWAYS:
7942 list_add(&p->action, &hdev->pend_le_conns);
7943 break;
7944 case HCI_AUTO_CONN_REPORT:
7945 list_add(&p->action, &hdev->pend_le_reports);
7946 break;
7947 default:
7948 break;
7949 }
7950 }
7951 }
7952
mgmt_power_on(struct hci_dev * hdev,int err)7953 void mgmt_power_on(struct hci_dev *hdev, int err)
7954 {
7955 struct cmd_lookup match = { NULL, hdev };
7956
7957 bt_dev_dbg(hdev, "err %d", err);
7958
7959 hci_dev_lock(hdev);
7960
7961 if (!err) {
7962 restart_le_actions(hdev);
7963 hci_update_background_scan(hdev);
7964 }
7965
7966 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7967
7968 new_settings(hdev, match.sk);
7969
7970 if (match.sk)
7971 sock_put(match.sk);
7972
7973 hci_dev_unlock(hdev);
7974 }
7975
__mgmt_power_off(struct hci_dev * hdev)7976 void __mgmt_power_off(struct hci_dev *hdev)
7977 {
7978 struct cmd_lookup match = { NULL, hdev };
7979 u8 status, zero_cod[] = { 0, 0, 0 };
7980
7981 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7982
7983 /* If the power off is because of hdev unregistration let
7984 * use the appropriate INVALID_INDEX status. Otherwise use
7985 * NOT_POWERED. We cover both scenarios here since later in
7986 * mgmt_index_removed() any hci_conn callbacks will have already
7987 * been triggered, potentially causing misleading DISCONNECTED
7988 * status responses.
7989 */
7990 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7991 status = MGMT_STATUS_INVALID_INDEX;
7992 else
7993 status = MGMT_STATUS_NOT_POWERED;
7994
7995 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7996
7997 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7998 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7999 zero_cod, sizeof(zero_cod),
8000 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8001 ext_info_changed(hdev, NULL);
8002 }
8003
8004 new_settings(hdev, match.sk);
8005
8006 if (match.sk)
8007 sock_put(match.sk);
8008 }
8009
mgmt_set_powered_failed(struct hci_dev * hdev,int err)8010 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8011 {
8012 struct mgmt_pending_cmd *cmd;
8013 u8 status;
8014
8015 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8016 if (!cmd)
8017 return;
8018
8019 if (err == -ERFKILL)
8020 status = MGMT_STATUS_RFKILLED;
8021 else
8022 status = MGMT_STATUS_FAILED;
8023
8024 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8025
8026 mgmt_pending_remove(cmd);
8027 }
8028
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)8029 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8030 bool persistent)
8031 {
8032 struct mgmt_ev_new_link_key ev;
8033
8034 memset(&ev, 0, sizeof(ev));
8035
8036 ev.store_hint = persistent;
8037 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8038 ev.key.addr.type = BDADDR_BREDR;
8039 ev.key.type = key->type;
8040 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8041 ev.key.pin_len = key->pin_len;
8042
8043 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8044 }
8045
mgmt_ltk_type(struct smp_ltk * ltk)8046 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8047 {
8048 switch (ltk->type) {
8049 case SMP_LTK:
8050 case SMP_LTK_SLAVE:
8051 if (ltk->authenticated)
8052 return MGMT_LTK_AUTHENTICATED;
8053 return MGMT_LTK_UNAUTHENTICATED;
8054 case SMP_LTK_P256:
8055 if (ltk->authenticated)
8056 return MGMT_LTK_P256_AUTH;
8057 return MGMT_LTK_P256_UNAUTH;
8058 case SMP_LTK_P256_DEBUG:
8059 return MGMT_LTK_P256_DEBUG;
8060 }
8061
8062 return MGMT_LTK_UNAUTHENTICATED;
8063 }
8064
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)8065 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8066 {
8067 struct mgmt_ev_new_long_term_key ev;
8068
8069 memset(&ev, 0, sizeof(ev));
8070
8071 /* Devices using resolvable or non-resolvable random addresses
8072 * without providing an identity resolving key don't require
8073 * to store long term keys. Their addresses will change the
8074 * next time around.
8075 *
8076 * Only when a remote device provides an identity address
8077 * make sure the long term key is stored. If the remote
8078 * identity is known, the long term keys are internally
8079 * mapped to the identity address. So allow static random
8080 * and public addresses here.
8081 */
8082 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8083 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8084 ev.store_hint = 0x00;
8085 else
8086 ev.store_hint = persistent;
8087
8088 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8089 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8090 ev.key.type = mgmt_ltk_type(key);
8091 ev.key.enc_size = key->enc_size;
8092 ev.key.ediv = key->ediv;
8093 ev.key.rand = key->rand;
8094
8095 if (key->type == SMP_LTK)
8096 ev.key.master = 1;
8097
8098 /* Make sure we copy only the significant bytes based on the
8099 * encryption key size, and set the rest of the value to zeroes.
8100 */
8101 memcpy(ev.key.val, key->val, key->enc_size);
8102 memset(ev.key.val + key->enc_size, 0,
8103 sizeof(ev.key.val) - key->enc_size);
8104
8105 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8106 }
8107
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)8108 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8109 {
8110 struct mgmt_ev_new_irk ev;
8111
8112 memset(&ev, 0, sizeof(ev));
8113
8114 ev.store_hint = persistent;
8115
8116 bacpy(&ev.rpa, &irk->rpa);
8117 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8118 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8119 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8120
8121 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8122 }
8123
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)8124 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8125 bool persistent)
8126 {
8127 struct mgmt_ev_new_csrk ev;
8128
8129 memset(&ev, 0, sizeof(ev));
8130
8131 /* Devices using resolvable or non-resolvable random addresses
8132 * without providing an identity resolving key don't require
8133 * to store signature resolving keys. Their addresses will change
8134 * the next time around.
8135 *
8136 * Only when a remote device provides an identity address
8137 * make sure the signature resolving key is stored. So allow
8138 * static random and public addresses here.
8139 */
8140 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8141 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8142 ev.store_hint = 0x00;
8143 else
8144 ev.store_hint = persistent;
8145
8146 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8147 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8148 ev.key.type = csrk->type;
8149 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8150
8151 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8152 }
8153
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)8154 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8155 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8156 u16 max_interval, u16 latency, u16 timeout)
8157 {
8158 struct mgmt_ev_new_conn_param ev;
8159
8160 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8161 return;
8162
8163 memset(&ev, 0, sizeof(ev));
8164 bacpy(&ev.addr.bdaddr, bdaddr);
8165 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8166 ev.store_hint = store_hint;
8167 ev.min_interval = cpu_to_le16(min_interval);
8168 ev.max_interval = cpu_to_le16(max_interval);
8169 ev.latency = cpu_to_le16(latency);
8170 ev.timeout = cpu_to_le16(timeout);
8171
8172 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8173 }
8174
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u32 flags,u8 * name,u8 name_len)8175 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8176 u32 flags, u8 *name, u8 name_len)
8177 {
8178 char buf[512];
8179 struct mgmt_ev_device_connected *ev = (void *) buf;
8180 u16 eir_len = 0;
8181
8182 bacpy(&ev->addr.bdaddr, &conn->dst);
8183 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8184
8185 ev->flags = __cpu_to_le32(flags);
8186
8187 /* We must ensure that the EIR Data fields are ordered and
8188 * unique. Keep it simple for now and avoid the problem by not
8189 * adding any BR/EDR data to the LE adv.
8190 */
8191 if (conn->le_adv_data_len > 0) {
8192 memcpy(&ev->eir[eir_len],
8193 conn->le_adv_data, conn->le_adv_data_len);
8194 eir_len = conn->le_adv_data_len;
8195 } else {
8196 if (name_len > 0)
8197 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8198 name, name_len);
8199
8200 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8201 eir_len = eir_append_data(ev->eir, eir_len,
8202 EIR_CLASS_OF_DEV,
8203 conn->dev_class, 3);
8204 }
8205
8206 ev->eir_len = cpu_to_le16(eir_len);
8207
8208 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8209 sizeof(*ev) + eir_len, NULL);
8210 }
8211
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)8212 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8213 {
8214 struct sock **sk = data;
8215
8216 cmd->cmd_complete(cmd, 0);
8217
8218 *sk = cmd->sk;
8219 sock_hold(*sk);
8220
8221 mgmt_pending_remove(cmd);
8222 }
8223
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)8224 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8225 {
8226 struct hci_dev *hdev = data;
8227 struct mgmt_cp_unpair_device *cp = cmd->param;
8228
8229 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8230
8231 cmd->cmd_complete(cmd, 0);
8232 mgmt_pending_remove(cmd);
8233 }
8234
mgmt_powering_down(struct hci_dev * hdev)8235 bool mgmt_powering_down(struct hci_dev *hdev)
8236 {
8237 struct mgmt_pending_cmd *cmd;
8238 struct mgmt_mode *cp;
8239
8240 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8241 if (!cmd)
8242 return false;
8243
8244 cp = cmd->param;
8245 if (!cp->val)
8246 return true;
8247
8248 return false;
8249 }
8250
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)8251 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8252 u8 link_type, u8 addr_type, u8 reason,
8253 bool mgmt_connected)
8254 {
8255 struct mgmt_ev_device_disconnected ev;
8256 struct sock *sk = NULL;
8257
8258 /* The connection is still in hci_conn_hash so test for 1
8259 * instead of 0 to know if this is the last one.
8260 */
8261 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8262 cancel_delayed_work(&hdev->power_off);
8263 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8264 }
8265
8266 if (!mgmt_connected)
8267 return;
8268
8269 if (link_type != ACL_LINK && link_type != LE_LINK)
8270 return;
8271
8272 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8273
8274 bacpy(&ev.addr.bdaddr, bdaddr);
8275 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8276 ev.reason = reason;
8277
8278 /* Report disconnects due to suspend */
8279 if (hdev->suspended)
8280 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8281
8282 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8283
8284 if (sk)
8285 sock_put(sk);
8286
8287 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8288 hdev);
8289 }
8290
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8291 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8292 u8 link_type, u8 addr_type, u8 status)
8293 {
8294 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8295 struct mgmt_cp_disconnect *cp;
8296 struct mgmt_pending_cmd *cmd;
8297
8298 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8299 hdev);
8300
8301 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8302 if (!cmd)
8303 return;
8304
8305 cp = cmd->param;
8306
8307 if (bacmp(bdaddr, &cp->addr.bdaddr))
8308 return;
8309
8310 if (cp->addr.type != bdaddr_type)
8311 return;
8312
8313 cmd->cmd_complete(cmd, mgmt_status(status));
8314 mgmt_pending_remove(cmd);
8315 }
8316
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8317 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8318 u8 addr_type, u8 status)
8319 {
8320 struct mgmt_ev_connect_failed ev;
8321
8322 /* The connection is still in hci_conn_hash so test for 1
8323 * instead of 0 to know if this is the last one.
8324 */
8325 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8326 cancel_delayed_work(&hdev->power_off);
8327 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8328 }
8329
8330 bacpy(&ev.addr.bdaddr, bdaddr);
8331 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8332 ev.status = mgmt_status(status);
8333
8334 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8335 }
8336
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)8337 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8338 {
8339 struct mgmt_ev_pin_code_request ev;
8340
8341 bacpy(&ev.addr.bdaddr, bdaddr);
8342 ev.addr.type = BDADDR_BREDR;
8343 ev.secure = secure;
8344
8345 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8346 }
8347
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)8348 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8349 u8 status)
8350 {
8351 struct mgmt_pending_cmd *cmd;
8352
8353 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8354 if (!cmd)
8355 return;
8356
8357 cmd->cmd_complete(cmd, mgmt_status(status));
8358 mgmt_pending_remove(cmd);
8359 }
8360
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)8361 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8362 u8 status)
8363 {
8364 struct mgmt_pending_cmd *cmd;
8365
8366 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8367 if (!cmd)
8368 return;
8369
8370 cmd->cmd_complete(cmd, mgmt_status(status));
8371 mgmt_pending_remove(cmd);
8372 }
8373
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)8374 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8375 u8 link_type, u8 addr_type, u32 value,
8376 u8 confirm_hint)
8377 {
8378 struct mgmt_ev_user_confirm_request ev;
8379
8380 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8381
8382 bacpy(&ev.addr.bdaddr, bdaddr);
8383 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8384 ev.confirm_hint = confirm_hint;
8385 ev.value = cpu_to_le32(value);
8386
8387 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8388 NULL);
8389 }
8390
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)8391 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8392 u8 link_type, u8 addr_type)
8393 {
8394 struct mgmt_ev_user_passkey_request ev;
8395
8396 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8397
8398 bacpy(&ev.addr.bdaddr, bdaddr);
8399 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8400
8401 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8402 NULL);
8403 }
8404
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)8405 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8406 u8 link_type, u8 addr_type, u8 status,
8407 u8 opcode)
8408 {
8409 struct mgmt_pending_cmd *cmd;
8410
8411 cmd = pending_find(opcode, hdev);
8412 if (!cmd)
8413 return -ENOENT;
8414
8415 cmd->cmd_complete(cmd, mgmt_status(status));
8416 mgmt_pending_remove(cmd);
8417
8418 return 0;
8419 }
8420
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8421 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8422 u8 link_type, u8 addr_type, u8 status)
8423 {
8424 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8425 status, MGMT_OP_USER_CONFIRM_REPLY);
8426 }
8427
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8428 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8429 u8 link_type, u8 addr_type, u8 status)
8430 {
8431 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8432 status,
8433 MGMT_OP_USER_CONFIRM_NEG_REPLY);
8434 }
8435
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8436 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8437 u8 link_type, u8 addr_type, u8 status)
8438 {
8439 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8440 status, MGMT_OP_USER_PASSKEY_REPLY);
8441 }
8442
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8443 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8444 u8 link_type, u8 addr_type, u8 status)
8445 {
8446 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8447 status,
8448 MGMT_OP_USER_PASSKEY_NEG_REPLY);
8449 }
8450
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)8451 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8452 u8 link_type, u8 addr_type, u32 passkey,
8453 u8 entered)
8454 {
8455 struct mgmt_ev_passkey_notify ev;
8456
8457 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8458
8459 bacpy(&ev.addr.bdaddr, bdaddr);
8460 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8461 ev.passkey = __cpu_to_le32(passkey);
8462 ev.entered = entered;
8463
8464 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8465 }
8466
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)8467 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8468 {
8469 struct mgmt_ev_auth_failed ev;
8470 struct mgmt_pending_cmd *cmd;
8471 u8 status = mgmt_status(hci_status);
8472
8473 bacpy(&ev.addr.bdaddr, &conn->dst);
8474 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8475 ev.status = status;
8476
8477 cmd = find_pairing(conn);
8478
8479 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8480 cmd ? cmd->sk : NULL);
8481
8482 if (cmd) {
8483 cmd->cmd_complete(cmd, status);
8484 mgmt_pending_remove(cmd);
8485 }
8486 }
8487
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)8488 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8489 {
8490 struct cmd_lookup match = { NULL, hdev };
8491 bool changed;
8492
8493 if (status) {
8494 u8 mgmt_err = mgmt_status(status);
8495 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8496 cmd_status_rsp, &mgmt_err);
8497 return;
8498 }
8499
8500 if (test_bit(HCI_AUTH, &hdev->flags))
8501 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8502 else
8503 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8504
8505 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8506 &match);
8507
8508 if (changed)
8509 new_settings(hdev, match.sk);
8510
8511 if (match.sk)
8512 sock_put(match.sk);
8513 }
8514
clear_eir(struct hci_request * req)8515 static void clear_eir(struct hci_request *req)
8516 {
8517 struct hci_dev *hdev = req->hdev;
8518 struct hci_cp_write_eir cp;
8519
8520 if (!lmp_ext_inq_capable(hdev))
8521 return;
8522
8523 memset(hdev->eir, 0, sizeof(hdev->eir));
8524
8525 memset(&cp, 0, sizeof(cp));
8526
8527 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8528 }
8529
mgmt_ssp_enable_complete(struct hci_dev * hdev,u8 enable,u8 status)8530 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8531 {
8532 struct cmd_lookup match = { NULL, hdev };
8533 struct hci_request req;
8534 bool changed = false;
8535
8536 if (status) {
8537 u8 mgmt_err = mgmt_status(status);
8538
8539 if (enable && hci_dev_test_and_clear_flag(hdev,
8540 HCI_SSP_ENABLED)) {
8541 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8542 new_settings(hdev, NULL);
8543 }
8544
8545 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8546 &mgmt_err);
8547 return;
8548 }
8549
8550 if (enable) {
8551 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8552 } else {
8553 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8554 if (!changed)
8555 changed = hci_dev_test_and_clear_flag(hdev,
8556 HCI_HS_ENABLED);
8557 else
8558 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8559 }
8560
8561 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8562
8563 if (changed)
8564 new_settings(hdev, match.sk);
8565
8566 if (match.sk)
8567 sock_put(match.sk);
8568
8569 hci_req_init(&req, hdev);
8570
8571 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8572 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8573 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8574 sizeof(enable), &enable);
8575 __hci_req_update_eir(&req);
8576 } else {
8577 clear_eir(&req);
8578 }
8579
8580 hci_req_run(&req, NULL);
8581 }
8582
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)8583 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8584 {
8585 struct cmd_lookup *match = data;
8586
8587 if (match->sk == NULL) {
8588 match->sk = cmd->sk;
8589 sock_hold(match->sk);
8590 }
8591 }
8592
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)8593 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8594 u8 status)
8595 {
8596 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8597
8598 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8599 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8600 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8601
8602 if (!status) {
8603 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8604 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8605 ext_info_changed(hdev, NULL);
8606 }
8607
8608 if (match.sk)
8609 sock_put(match.sk);
8610 }
8611
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)8612 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8613 {
8614 struct mgmt_cp_set_local_name ev;
8615 struct mgmt_pending_cmd *cmd;
8616
8617 if (status)
8618 return;
8619
8620 memset(&ev, 0, sizeof(ev));
8621 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8622 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8623
8624 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8625 if (!cmd) {
8626 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8627
8628 /* If this is a HCI command related to powering on the
8629 * HCI dev don't send any mgmt signals.
8630 */
8631 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8632 return;
8633 }
8634
8635 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8636 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8637 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8638 }
8639
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])8640 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8641 {
8642 int i;
8643
8644 for (i = 0; i < uuid_count; i++) {
8645 if (!memcmp(uuid, uuids[i], 16))
8646 return true;
8647 }
8648
8649 return false;
8650 }
8651
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])8652 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8653 {
8654 u16 parsed = 0;
8655
8656 while (parsed < eir_len) {
8657 u8 field_len = eir[0];
8658 u8 uuid[16];
8659 int i;
8660
8661 if (field_len == 0)
8662 break;
8663
8664 if (eir_len - parsed < field_len + 1)
8665 break;
8666
8667 switch (eir[1]) {
8668 case EIR_UUID16_ALL:
8669 case EIR_UUID16_SOME:
8670 for (i = 0; i + 3 <= field_len; i += 2) {
8671 memcpy(uuid, bluetooth_base_uuid, 16);
8672 uuid[13] = eir[i + 3];
8673 uuid[12] = eir[i + 2];
8674 if (has_uuid(uuid, uuid_count, uuids))
8675 return true;
8676 }
8677 break;
8678 case EIR_UUID32_ALL:
8679 case EIR_UUID32_SOME:
8680 for (i = 0; i + 5 <= field_len; i += 4) {
8681 memcpy(uuid, bluetooth_base_uuid, 16);
8682 uuid[15] = eir[i + 5];
8683 uuid[14] = eir[i + 4];
8684 uuid[13] = eir[i + 3];
8685 uuid[12] = eir[i + 2];
8686 if (has_uuid(uuid, uuid_count, uuids))
8687 return true;
8688 }
8689 break;
8690 case EIR_UUID128_ALL:
8691 case EIR_UUID128_SOME:
8692 for (i = 0; i + 17 <= field_len; i += 16) {
8693 memcpy(uuid, eir + i + 2, 16);
8694 if (has_uuid(uuid, uuid_count, uuids))
8695 return true;
8696 }
8697 break;
8698 }
8699
8700 parsed += field_len + 1;
8701 eir += field_len + 1;
8702 }
8703
8704 return false;
8705 }
8706
restart_le_scan(struct hci_dev * hdev)8707 static void restart_le_scan(struct hci_dev *hdev)
8708 {
8709 /* If controller is not scanning we are done. */
8710 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8711 return;
8712
8713 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8714 hdev->discovery.scan_start +
8715 hdev->discovery.scan_duration))
8716 return;
8717
8718 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8719 DISCOV_LE_RESTART_DELAY);
8720 }
8721
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)8722 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8723 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8724 {
8725 /* If a RSSI threshold has been specified, and
8726 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8727 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8728 * is set, let it through for further processing, as we might need to
8729 * restart the scan.
8730 *
8731 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8732 * the results are also dropped.
8733 */
8734 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8735 (rssi == HCI_RSSI_INVALID ||
8736 (rssi < hdev->discovery.rssi &&
8737 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8738 return false;
8739
8740 if (hdev->discovery.uuid_count != 0) {
8741 /* If a list of UUIDs is provided in filter, results with no
8742 * matching UUID should be dropped.
8743 */
8744 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8745 hdev->discovery.uuids) &&
8746 !eir_has_uuids(scan_rsp, scan_rsp_len,
8747 hdev->discovery.uuid_count,
8748 hdev->discovery.uuids))
8749 return false;
8750 }
8751
8752 /* If duplicate filtering does not report RSSI changes, then restart
8753 * scanning to ensure updated result with updated RSSI values.
8754 */
8755 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8756 restart_le_scan(hdev);
8757
8758 /* Validate RSSI value against the RSSI threshold once more. */
8759 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8760 rssi < hdev->discovery.rssi)
8761 return false;
8762 }
8763
8764 return true;
8765 }
8766
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)8767 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8768 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8769 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8770 {
8771 char buf[512];
8772 struct mgmt_ev_device_found *ev = (void *)buf;
8773 size_t ev_size;
8774
8775 /* Don't send events for a non-kernel initiated discovery. With
8776 * LE one exception is if we have pend_le_reports > 0 in which
8777 * case we're doing passive scanning and want these events.
8778 */
8779 if (!hci_discovery_active(hdev)) {
8780 if (link_type == ACL_LINK)
8781 return;
8782 if (link_type == LE_LINK &&
8783 list_empty(&hdev->pend_le_reports) &&
8784 !hci_is_adv_monitoring(hdev)) {
8785 return;
8786 }
8787 }
8788
8789 if (hdev->discovery.result_filtering) {
8790 /* We are using service discovery */
8791 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8792 scan_rsp_len))
8793 return;
8794 }
8795
8796 if (hdev->discovery.limited) {
8797 /* Check for limited discoverable bit */
8798 if (dev_class) {
8799 if (!(dev_class[1] & 0x20))
8800 return;
8801 } else {
8802 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8803 if (!flags || !(flags[0] & LE_AD_LIMITED))
8804 return;
8805 }
8806 }
8807
8808 /* Make sure that the buffer is big enough. The 5 extra bytes
8809 * are for the potential CoD field.
8810 */
8811 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8812 return;
8813
8814 memset(buf, 0, sizeof(buf));
8815
8816 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8817 * RSSI value was reported as 0 when not available. This behavior
8818 * is kept when using device discovery. This is required for full
8819 * backwards compatibility with the API.
8820 *
8821 * However when using service discovery, the value 127 will be
8822 * returned when the RSSI is not available.
8823 */
8824 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8825 link_type == ACL_LINK)
8826 rssi = 0;
8827
8828 bacpy(&ev->addr.bdaddr, bdaddr);
8829 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8830 ev->rssi = rssi;
8831 ev->flags = cpu_to_le32(flags);
8832
8833 if (eir_len > 0)
8834 /* Copy EIR or advertising data into event */
8835 memcpy(ev->eir, eir, eir_len);
8836
8837 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8838 NULL))
8839 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8840 dev_class, 3);
8841
8842 if (scan_rsp_len > 0)
8843 /* Append scan response data to event */
8844 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8845
8846 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8847 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8848
8849 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8850 }
8851
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)8852 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8853 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8854 {
8855 struct mgmt_ev_device_found *ev;
8856 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8857 u16 eir_len;
8858
8859 ev = (struct mgmt_ev_device_found *) buf;
8860
8861 memset(buf, 0, sizeof(buf));
8862
8863 bacpy(&ev->addr.bdaddr, bdaddr);
8864 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8865 ev->rssi = rssi;
8866
8867 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8868 name_len);
8869
8870 ev->eir_len = cpu_to_le16(eir_len);
8871
8872 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8873 }
8874
mgmt_discovering(struct hci_dev * hdev,u8 discovering)8875 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8876 {
8877 struct mgmt_ev_discovering ev;
8878
8879 bt_dev_dbg(hdev, "discovering %u", discovering);
8880
8881 memset(&ev, 0, sizeof(ev));
8882 ev.type = hdev->discovery.type;
8883 ev.discovering = discovering;
8884
8885 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8886 }
8887
mgmt_suspending(struct hci_dev * hdev,u8 state)8888 void mgmt_suspending(struct hci_dev *hdev, u8 state)
8889 {
8890 struct mgmt_ev_controller_suspend ev;
8891
8892 ev.suspend_state = state;
8893 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
8894 }
8895
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)8896 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
8897 u8 addr_type)
8898 {
8899 struct mgmt_ev_controller_resume ev;
8900
8901 ev.wake_reason = reason;
8902 if (bdaddr) {
8903 bacpy(&ev.addr.bdaddr, bdaddr);
8904 ev.addr.type = addr_type;
8905 } else {
8906 memset(&ev.addr, 0, sizeof(ev.addr));
8907 }
8908
8909 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
8910 }
8911
8912 static struct hci_mgmt_chan chan = {
8913 .channel = HCI_CHANNEL_CONTROL,
8914 .handler_count = ARRAY_SIZE(mgmt_handlers),
8915 .handlers = mgmt_handlers,
8916 .hdev_init = mgmt_init_hdev,
8917 };
8918
mgmt_init(void)8919 int mgmt_init(void)
8920 {
8921 return hci_mgmt_chan_register(&chan);
8922 }
8923
mgmt_exit(void)8924 void mgmt_exit(void)
8925 {
8926 hci_mgmt_chan_unregister(&chan);
8927 }
8928