1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 
33 #define HCI_REQ_DONE	  0
34 #define HCI_REQ_PEND	  1
35 #define HCI_REQ_CANCELED  2
36 
hci_req_init(struct hci_request * req,struct hci_dev * hdev)37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 	skb_queue_head_init(&req->cmd_q);
40 	req->hdev = hdev;
41 	req->err = 0;
42 }
43 
hci_req_purge(struct hci_request * req)44 void hci_req_purge(struct hci_request *req)
45 {
46 	skb_queue_purge(&req->cmd_q);
47 }
48 
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)49 static int req_run(struct hci_request *req, hci_req_complete_t complete,
50 		   hci_req_complete_skb_t complete_skb)
51 {
52 	struct hci_dev *hdev = req->hdev;
53 	struct sk_buff *skb;
54 	unsigned long flags;
55 
56 	BT_DBG("length %u", skb_queue_len(&req->cmd_q));
57 
58 	/* If an error occurred during request building, remove all HCI
59 	 * commands queued on the HCI request queue.
60 	 */
61 	if (req->err) {
62 		skb_queue_purge(&req->cmd_q);
63 		return req->err;
64 	}
65 
66 	/* Do not allow empty requests */
67 	if (skb_queue_empty(&req->cmd_q))
68 		return -ENODATA;
69 
70 	skb = skb_peek_tail(&req->cmd_q);
71 	if (complete) {
72 		bt_cb(skb)->hci.req_complete = complete;
73 	} else if (complete_skb) {
74 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
75 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
76 	}
77 
78 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
79 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
80 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
81 
82 	queue_work(hdev->workqueue, &hdev->cmd_work);
83 
84 	return 0;
85 }
86 
hci_req_run(struct hci_request * req,hci_req_complete_t complete)87 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
88 {
89 	return req_run(req, complete, NULL);
90 }
91 
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)92 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
93 {
94 	return req_run(req, NULL, complete);
95 }
96 
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)97 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
98 				  struct sk_buff *skb)
99 {
100 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
101 
102 	if (hdev->req_status == HCI_REQ_PEND) {
103 		hdev->req_result = result;
104 		hdev->req_status = HCI_REQ_DONE;
105 		if (skb)
106 			hdev->req_skb = skb_get(skb);
107 		wake_up_interruptible(&hdev->req_wait_q);
108 	}
109 }
110 
hci_req_sync_cancel(struct hci_dev * hdev,int err)111 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
112 {
113 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
114 
115 	if (hdev->req_status == HCI_REQ_PEND) {
116 		hdev->req_result = err;
117 		hdev->req_status = HCI_REQ_CANCELED;
118 		wake_up_interruptible(&hdev->req_wait_q);
119 	}
120 }
121 
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)122 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
123 				  const void *param, u8 event, u32 timeout)
124 {
125 	struct hci_request req;
126 	struct sk_buff *skb;
127 	int err = 0;
128 
129 	BT_DBG("%s", hdev->name);
130 
131 	hci_req_init(&req, hdev);
132 
133 	hci_req_add_ev(&req, opcode, plen, param, event);
134 
135 	hdev->req_status = HCI_REQ_PEND;
136 
137 	err = hci_req_run_skb(&req, hci_req_sync_complete);
138 	if (err < 0)
139 		return ERR_PTR(err);
140 
141 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
142 			hdev->req_status != HCI_REQ_PEND, timeout);
143 
144 	if (err == -ERESTARTSYS)
145 		return ERR_PTR(-EINTR);
146 
147 	switch (hdev->req_status) {
148 	case HCI_REQ_DONE:
149 		err = -bt_to_errno(hdev->req_result);
150 		break;
151 
152 	case HCI_REQ_CANCELED:
153 		err = -hdev->req_result;
154 		break;
155 
156 	default:
157 		err = -ETIMEDOUT;
158 		break;
159 	}
160 
161 	hdev->req_status = hdev->req_result = 0;
162 	skb = hdev->req_skb;
163 	hdev->req_skb = NULL;
164 
165 	BT_DBG("%s end: err %d", hdev->name, err);
166 
167 	if (err < 0) {
168 		kfree_skb(skb);
169 		return ERR_PTR(err);
170 	}
171 
172 	if (!skb)
173 		return ERR_PTR(-ENODATA);
174 
175 	return skb;
176 }
177 EXPORT_SYMBOL(__hci_cmd_sync_ev);
178 
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)179 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 			       const void *param, u32 timeout)
181 {
182 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183 }
184 EXPORT_SYMBOL(__hci_cmd_sync);
185 
186 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)187 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
188 						     unsigned long opt),
189 		   unsigned long opt, u32 timeout, u8 *hci_status)
190 {
191 	struct hci_request req;
192 	int err = 0;
193 
194 	BT_DBG("%s start", hdev->name);
195 
196 	hci_req_init(&req, hdev);
197 
198 	hdev->req_status = HCI_REQ_PEND;
199 
200 	err = func(&req, opt);
201 	if (err) {
202 		if (hci_status)
203 			*hci_status = HCI_ERROR_UNSPECIFIED;
204 		return err;
205 	}
206 
207 	err = hci_req_run_skb(&req, hci_req_sync_complete);
208 	if (err < 0) {
209 		hdev->req_status = 0;
210 
211 		/* ENODATA means the HCI request command queue is empty.
212 		 * This can happen when a request with conditionals doesn't
213 		 * trigger any commands to be sent. This is normal behavior
214 		 * and should not trigger an error return.
215 		 */
216 		if (err == -ENODATA) {
217 			if (hci_status)
218 				*hci_status = 0;
219 			return 0;
220 		}
221 
222 		if (hci_status)
223 			*hci_status = HCI_ERROR_UNSPECIFIED;
224 
225 		return err;
226 	}
227 
228 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
229 			hdev->req_status != HCI_REQ_PEND, timeout);
230 
231 	if (err == -ERESTARTSYS)
232 		return -EINTR;
233 
234 	switch (hdev->req_status) {
235 	case HCI_REQ_DONE:
236 		err = -bt_to_errno(hdev->req_result);
237 		if (hci_status)
238 			*hci_status = hdev->req_result;
239 		break;
240 
241 	case HCI_REQ_CANCELED:
242 		err = -hdev->req_result;
243 		if (hci_status)
244 			*hci_status = HCI_ERROR_UNSPECIFIED;
245 		break;
246 
247 	default:
248 		err = -ETIMEDOUT;
249 		if (hci_status)
250 			*hci_status = HCI_ERROR_UNSPECIFIED;
251 		break;
252 	}
253 
254 	kfree_skb(hdev->req_skb);
255 	hdev->req_skb = NULL;
256 	hdev->req_status = hdev->req_result = 0;
257 
258 	BT_DBG("%s end: err %d", hdev->name, err);
259 
260 	return err;
261 }
262 
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)263 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
264 						  unsigned long opt),
265 		 unsigned long opt, u32 timeout, u8 *hci_status)
266 {
267 	int ret;
268 
269 	if (!test_bit(HCI_UP, &hdev->flags))
270 		return -ENETDOWN;
271 
272 	/* Serialize all requests */
273 	hci_req_sync_lock(hdev);
274 	ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
275 	hci_req_sync_unlock(hdev);
276 
277 	return ret;
278 }
279 
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)280 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
281 				const void *param)
282 {
283 	int len = HCI_COMMAND_HDR_SIZE + plen;
284 	struct hci_command_hdr *hdr;
285 	struct sk_buff *skb;
286 
287 	skb = bt_skb_alloc(len, GFP_ATOMIC);
288 	if (!skb)
289 		return NULL;
290 
291 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
292 	hdr->opcode = cpu_to_le16(opcode);
293 	hdr->plen   = plen;
294 
295 	if (plen)
296 		skb_put_data(skb, param, plen);
297 
298 	BT_DBG("skb len %d", skb->len);
299 
300 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
301 	hci_skb_opcode(skb) = opcode;
302 
303 	return skb;
304 }
305 
306 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)307 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
308 		    const void *param, u8 event)
309 {
310 	struct hci_dev *hdev = req->hdev;
311 	struct sk_buff *skb;
312 
313 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
314 
315 	/* If an error occurred during request building, there is no point in
316 	 * queueing the HCI command. We can simply return.
317 	 */
318 	if (req->err)
319 		return;
320 
321 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
322 	if (!skb) {
323 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
324 			   opcode);
325 		req->err = -ENOMEM;
326 		return;
327 	}
328 
329 	if (skb_queue_empty(&req->cmd_q))
330 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
331 
332 	bt_cb(skb)->hci.req_event = event;
333 
334 	skb_queue_tail(&req->cmd_q, skb);
335 }
336 
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)337 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
338 		 const void *param)
339 {
340 	hci_req_add_ev(req, opcode, plen, param, 0);
341 }
342 
__hci_req_write_fast_connectable(struct hci_request * req,bool enable)343 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
344 {
345 	struct hci_dev *hdev = req->hdev;
346 	struct hci_cp_write_page_scan_activity acp;
347 	u8 type;
348 
349 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
350 		return;
351 
352 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
353 		return;
354 
355 	if (enable) {
356 		type = PAGE_SCAN_TYPE_INTERLACED;
357 
358 		/* 160 msec page scan interval */
359 		acp.interval = cpu_to_le16(0x0100);
360 	} else {
361 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
362 
363 		/* default 1.28 sec page scan */
364 		acp.interval = cpu_to_le16(0x0800);
365 	}
366 
367 	acp.window = cpu_to_le16(0x0012);
368 
369 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
370 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
371 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
372 			    sizeof(acp), &acp);
373 
374 	if (hdev->page_scan_type != type)
375 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
376 }
377 
378 /* This function controls the background scanning based on hdev->pend_le_conns
379  * list. If there are pending LE connection we start the background scanning,
380  * otherwise we stop it.
381  *
382  * This function requires the caller holds hdev->lock.
383  */
__hci_update_background_scan(struct hci_request * req)384 static void __hci_update_background_scan(struct hci_request *req)
385 {
386 	struct hci_dev *hdev = req->hdev;
387 
388 	if (!test_bit(HCI_UP, &hdev->flags) ||
389 	    test_bit(HCI_INIT, &hdev->flags) ||
390 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
391 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
392 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
393 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
394 		return;
395 
396 	/* No point in doing scanning if LE support hasn't been enabled */
397 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
398 		return;
399 
400 	/* If discovery is active don't interfere with it */
401 	if (hdev->discovery.state != DISCOVERY_STOPPED)
402 		return;
403 
404 	/* Reset RSSI and UUID filters when starting background scanning
405 	 * since these filters are meant for service discovery only.
406 	 *
407 	 * The Start Discovery and Start Service Discovery operations
408 	 * ensure to set proper values for RSSI threshold and UUID
409 	 * filter list. So it is safe to just reset them here.
410 	 */
411 	hci_discovery_filter_clear(hdev);
412 
413 	if (list_empty(&hdev->pend_le_conns) &&
414 	    list_empty(&hdev->pend_le_reports)) {
415 		/* If there is no pending LE connections or devices
416 		 * to be scanned for, we should stop the background
417 		 * scanning.
418 		 */
419 
420 		/* If controller is not scanning we are done. */
421 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
422 			return;
423 
424 		hci_req_add_le_scan_disable(req);
425 
426 		BT_DBG("%s stopping background scanning", hdev->name);
427 	} else {
428 		/* If there is at least one pending LE connection, we should
429 		 * keep the background scan running.
430 		 */
431 
432 		/* If controller is connecting, we should not start scanning
433 		 * since some controllers are not able to scan and connect at
434 		 * the same time.
435 		 */
436 		if (hci_lookup_le_connect(hdev))
437 			return;
438 
439 		/* If controller is currently scanning, we stop it to ensure we
440 		 * don't miss any advertising (due to duplicates filter).
441 		 */
442 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
443 			hci_req_add_le_scan_disable(req);
444 
445 		hci_req_add_le_passive_scan(req);
446 
447 		BT_DBG("%s starting background scanning", hdev->name);
448 	}
449 }
450 
__hci_req_update_name(struct hci_request * req)451 void __hci_req_update_name(struct hci_request *req)
452 {
453 	struct hci_dev *hdev = req->hdev;
454 	struct hci_cp_write_local_name cp;
455 
456 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
457 
458 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
459 }
460 
461 #define PNP_INFO_SVCLASS_ID		0x1200
462 
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)463 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
464 {
465 	u8 *ptr = data, *uuids_start = NULL;
466 	struct bt_uuid *uuid;
467 
468 	if (len < 4)
469 		return ptr;
470 
471 	list_for_each_entry(uuid, &hdev->uuids, list) {
472 		u16 uuid16;
473 
474 		if (uuid->size != 16)
475 			continue;
476 
477 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
478 		if (uuid16 < 0x1100)
479 			continue;
480 
481 		if (uuid16 == PNP_INFO_SVCLASS_ID)
482 			continue;
483 
484 		if (!uuids_start) {
485 			uuids_start = ptr;
486 			uuids_start[0] = 1;
487 			uuids_start[1] = EIR_UUID16_ALL;
488 			ptr += 2;
489 		}
490 
491 		/* Stop if not enough space to put next UUID */
492 		if ((ptr - data) + sizeof(u16) > len) {
493 			uuids_start[1] = EIR_UUID16_SOME;
494 			break;
495 		}
496 
497 		*ptr++ = (uuid16 & 0x00ff);
498 		*ptr++ = (uuid16 & 0xff00) >> 8;
499 		uuids_start[0] += sizeof(uuid16);
500 	}
501 
502 	return ptr;
503 }
504 
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)505 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
506 {
507 	u8 *ptr = data, *uuids_start = NULL;
508 	struct bt_uuid *uuid;
509 
510 	if (len < 6)
511 		return ptr;
512 
513 	list_for_each_entry(uuid, &hdev->uuids, list) {
514 		if (uuid->size != 32)
515 			continue;
516 
517 		if (!uuids_start) {
518 			uuids_start = ptr;
519 			uuids_start[0] = 1;
520 			uuids_start[1] = EIR_UUID32_ALL;
521 			ptr += 2;
522 		}
523 
524 		/* Stop if not enough space to put next UUID */
525 		if ((ptr - data) + sizeof(u32) > len) {
526 			uuids_start[1] = EIR_UUID32_SOME;
527 			break;
528 		}
529 
530 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
531 		ptr += sizeof(u32);
532 		uuids_start[0] += sizeof(u32);
533 	}
534 
535 	return ptr;
536 }
537 
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)538 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
539 {
540 	u8 *ptr = data, *uuids_start = NULL;
541 	struct bt_uuid *uuid;
542 
543 	if (len < 18)
544 		return ptr;
545 
546 	list_for_each_entry(uuid, &hdev->uuids, list) {
547 		if (uuid->size != 128)
548 			continue;
549 
550 		if (!uuids_start) {
551 			uuids_start = ptr;
552 			uuids_start[0] = 1;
553 			uuids_start[1] = EIR_UUID128_ALL;
554 			ptr += 2;
555 		}
556 
557 		/* Stop if not enough space to put next UUID */
558 		if ((ptr - data) + 16 > len) {
559 			uuids_start[1] = EIR_UUID128_SOME;
560 			break;
561 		}
562 
563 		memcpy(ptr, uuid->uuid, 16);
564 		ptr += 16;
565 		uuids_start[0] += 16;
566 	}
567 
568 	return ptr;
569 }
570 
create_eir(struct hci_dev * hdev,u8 * data)571 static void create_eir(struct hci_dev *hdev, u8 *data)
572 {
573 	u8 *ptr = data;
574 	size_t name_len;
575 
576 	name_len = strlen(hdev->dev_name);
577 
578 	if (name_len > 0) {
579 		/* EIR Data type */
580 		if (name_len > 48) {
581 			name_len = 48;
582 			ptr[1] = EIR_NAME_SHORT;
583 		} else
584 			ptr[1] = EIR_NAME_COMPLETE;
585 
586 		/* EIR Data length */
587 		ptr[0] = name_len + 1;
588 
589 		memcpy(ptr + 2, hdev->dev_name, name_len);
590 
591 		ptr += (name_len + 2);
592 	}
593 
594 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
595 		ptr[0] = 2;
596 		ptr[1] = EIR_TX_POWER;
597 		ptr[2] = (u8) hdev->inq_tx_power;
598 
599 		ptr += 3;
600 	}
601 
602 	if (hdev->devid_source > 0) {
603 		ptr[0] = 9;
604 		ptr[1] = EIR_DEVICE_ID;
605 
606 		put_unaligned_le16(hdev->devid_source, ptr + 2);
607 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
608 		put_unaligned_le16(hdev->devid_product, ptr + 6);
609 		put_unaligned_le16(hdev->devid_version, ptr + 8);
610 
611 		ptr += 10;
612 	}
613 
614 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
615 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
616 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
617 }
618 
__hci_req_update_eir(struct hci_request * req)619 void __hci_req_update_eir(struct hci_request *req)
620 {
621 	struct hci_dev *hdev = req->hdev;
622 	struct hci_cp_write_eir cp;
623 
624 	if (!hdev_is_powered(hdev))
625 		return;
626 
627 	if (!lmp_ext_inq_capable(hdev))
628 		return;
629 
630 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
631 		return;
632 
633 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
634 		return;
635 
636 	memset(&cp, 0, sizeof(cp));
637 
638 	create_eir(hdev, cp.data);
639 
640 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
641 		return;
642 
643 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
644 
645 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
646 }
647 
hci_req_add_le_scan_disable(struct hci_request * req)648 void hci_req_add_le_scan_disable(struct hci_request *req)
649 {
650 	struct hci_dev *hdev = req->hdev;
651 
652 	if (use_ext_scan(hdev)) {
653 		struct hci_cp_le_set_ext_scan_enable cp;
654 
655 		memset(&cp, 0, sizeof(cp));
656 		cp.enable = LE_SCAN_DISABLE;
657 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
658 			    &cp);
659 	} else {
660 		struct hci_cp_le_set_scan_enable cp;
661 
662 		memset(&cp, 0, sizeof(cp));
663 		cp.enable = LE_SCAN_DISABLE;
664 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
665 	}
666 }
667 
add_to_white_list(struct hci_request * req,struct hci_conn_params * params)668 static void add_to_white_list(struct hci_request *req,
669 			      struct hci_conn_params *params)
670 {
671 	struct hci_cp_le_add_to_white_list cp;
672 
673 	cp.bdaddr_type = params->addr_type;
674 	bacpy(&cp.bdaddr, &params->addr);
675 
676 	hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677 }
678 
update_white_list(struct hci_request * req)679 static u8 update_white_list(struct hci_request *req)
680 {
681 	struct hci_dev *hdev = req->hdev;
682 	struct hci_conn_params *params;
683 	struct bdaddr_list *b;
684 	uint8_t white_list_entries = 0;
685 
686 	/* Go through the current white list programmed into the
687 	 * controller one by one and check if that address is still
688 	 * in the list of pending connections or list of devices to
689 	 * report. If not present in either list, then queue the
690 	 * command to remove it from the controller.
691 	 */
692 	list_for_each_entry(b, &hdev->le_white_list, list) {
693 		/* If the device is neither in pend_le_conns nor
694 		 * pend_le_reports then remove it from the whitelist.
695 		 */
696 		if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
697 					       &b->bdaddr, b->bdaddr_type) &&
698 		    !hci_pend_le_action_lookup(&hdev->pend_le_reports,
699 					       &b->bdaddr, b->bdaddr_type)) {
700 			struct hci_cp_le_del_from_white_list cp;
701 
702 			cp.bdaddr_type = b->bdaddr_type;
703 			bacpy(&cp.bdaddr, &b->bdaddr);
704 
705 			hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
706 				    sizeof(cp), &cp);
707 			continue;
708 		}
709 
710 		if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
711 			/* White list can not be used with RPAs */
712 			return 0x00;
713 		}
714 
715 		white_list_entries++;
716 	}
717 
718 	/* Since all no longer valid white list entries have been
719 	 * removed, walk through the list of pending connections
720 	 * and ensure that any new device gets programmed into
721 	 * the controller.
722 	 *
723 	 * If the list of the devices is larger than the list of
724 	 * available white list entries in the controller, then
725 	 * just abort and return filer policy value to not use the
726 	 * white list.
727 	 */
728 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
729 		if (hci_bdaddr_list_lookup(&hdev->le_white_list,
730 					   &params->addr, params->addr_type))
731 			continue;
732 
733 		if (white_list_entries >= hdev->le_white_list_size) {
734 			/* Select filter policy to accept all advertising */
735 			return 0x00;
736 		}
737 
738 		if (hci_find_irk_by_addr(hdev, &params->addr,
739 					 params->addr_type)) {
740 			/* White list can not be used with RPAs */
741 			return 0x00;
742 		}
743 
744 		white_list_entries++;
745 		add_to_white_list(req, params);
746 	}
747 
748 	/* After adding all new pending connections, walk through
749 	 * the list of pending reports and also add these to the
750 	 * white list if there is still space.
751 	 */
752 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
753 		if (hci_bdaddr_list_lookup(&hdev->le_white_list,
754 					   &params->addr, params->addr_type))
755 			continue;
756 
757 		if (white_list_entries >= hdev->le_white_list_size) {
758 			/* Select filter policy to accept all advertising */
759 			return 0x00;
760 		}
761 
762 		if (hci_find_irk_by_addr(hdev, &params->addr,
763 					 params->addr_type)) {
764 			/* White list can not be used with RPAs */
765 			return 0x00;
766 		}
767 
768 		white_list_entries++;
769 		add_to_white_list(req, params);
770 	}
771 
772 	/* Select filter policy to use white list */
773 	return 0x01;
774 }
775 
scan_use_rpa(struct hci_dev * hdev)776 static bool scan_use_rpa(struct hci_dev *hdev)
777 {
778 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
779 }
780 
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)781 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
782 			       u16 window, u8 own_addr_type, u8 filter_policy)
783 {
784 	struct hci_dev *hdev = req->hdev;
785 
786 	/* Use ext scanning if set ext scan param and ext scan enable is
787 	 * supported
788 	 */
789 	if (use_ext_scan(hdev)) {
790 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
791 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
792 		struct hci_cp_le_scan_phy_params *phy_params;
793 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
794 		u32 plen;
795 
796 		ext_param_cp = (void *)data;
797 		phy_params = (void *)ext_param_cp->data;
798 
799 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
800 		ext_param_cp->own_addr_type = own_addr_type;
801 		ext_param_cp->filter_policy = filter_policy;
802 
803 		plen = sizeof(*ext_param_cp);
804 
805 		if (scan_1m(hdev) || scan_2m(hdev)) {
806 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
807 
808 			memset(phy_params, 0, sizeof(*phy_params));
809 			phy_params->type = type;
810 			phy_params->interval = cpu_to_le16(interval);
811 			phy_params->window = cpu_to_le16(window);
812 
813 			plen += sizeof(*phy_params);
814 			phy_params++;
815 		}
816 
817 		if (scan_coded(hdev)) {
818 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
819 
820 			memset(phy_params, 0, sizeof(*phy_params));
821 			phy_params->type = type;
822 			phy_params->interval = cpu_to_le16(interval);
823 			phy_params->window = cpu_to_le16(window);
824 
825 			plen += sizeof(*phy_params);
826 			phy_params++;
827 		}
828 
829 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
830 			    plen, ext_param_cp);
831 
832 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
833 		ext_enable_cp.enable = LE_SCAN_ENABLE;
834 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
835 
836 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
837 			    sizeof(ext_enable_cp), &ext_enable_cp);
838 	} else {
839 		struct hci_cp_le_set_scan_param param_cp;
840 		struct hci_cp_le_set_scan_enable enable_cp;
841 
842 		memset(&param_cp, 0, sizeof(param_cp));
843 		param_cp.type = type;
844 		param_cp.interval = cpu_to_le16(interval);
845 		param_cp.window = cpu_to_le16(window);
846 		param_cp.own_address_type = own_addr_type;
847 		param_cp.filter_policy = filter_policy;
848 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
849 			    &param_cp);
850 
851 		memset(&enable_cp, 0, sizeof(enable_cp));
852 		enable_cp.enable = LE_SCAN_ENABLE;
853 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
854 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
855 			    &enable_cp);
856 	}
857 }
858 
hci_req_add_le_passive_scan(struct hci_request * req)859 void hci_req_add_le_passive_scan(struct hci_request *req)
860 {
861 	struct hci_dev *hdev = req->hdev;
862 	u8 own_addr_type;
863 	u8 filter_policy;
864 
865 	/* Set require_privacy to false since no SCAN_REQ are send
866 	 * during passive scanning. Not using an non-resolvable address
867 	 * here is important so that peer devices using direct
868 	 * advertising with our address will be correctly reported
869 	 * by the controller.
870 	 */
871 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
872 				      &own_addr_type))
873 		return;
874 
875 	/* Adding or removing entries from the white list must
876 	 * happen before enabling scanning. The controller does
877 	 * not allow white list modification while scanning.
878 	 */
879 	filter_policy = update_white_list(req);
880 
881 	/* When the controller is using random resolvable addresses and
882 	 * with that having LE privacy enabled, then controllers with
883 	 * Extended Scanner Filter Policies support can now enable support
884 	 * for handling directed advertising.
885 	 *
886 	 * So instead of using filter polices 0x00 (no whitelist)
887 	 * and 0x01 (whitelist enabled) use the new filter policies
888 	 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
889 	 */
890 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
891 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
892 		filter_policy |= 0x02;
893 
894 	hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
895 			   hdev->le_scan_window, own_addr_type, filter_policy);
896 }
897 
get_adv_instance_scan_rsp_len(struct hci_dev * hdev,u8 instance)898 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
899 {
900 	struct adv_info *adv_instance;
901 
902 	/* Ignore instance 0 */
903 	if (instance == 0x00)
904 		return 0;
905 
906 	adv_instance = hci_find_adv_instance(hdev, instance);
907 	if (!adv_instance)
908 		return 0;
909 
910 	/* TODO: Take into account the "appearance" and "local-name" flags here.
911 	 * These are currently being ignored as they are not supported.
912 	 */
913 	return adv_instance->scan_rsp_len;
914 }
915 
get_cur_adv_instance_scan_rsp_len(struct hci_dev * hdev)916 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
917 {
918 	u8 instance = hdev->cur_adv_instance;
919 	struct adv_info *adv_instance;
920 
921 	/* Ignore instance 0 */
922 	if (instance == 0x00)
923 		return 0;
924 
925 	adv_instance = hci_find_adv_instance(hdev, instance);
926 	if (!adv_instance)
927 		return 0;
928 
929 	/* TODO: Take into account the "appearance" and "local-name" flags here.
930 	 * These are currently being ignored as they are not supported.
931 	 */
932 	return adv_instance->scan_rsp_len;
933 }
934 
__hci_req_disable_advertising(struct hci_request * req)935 void __hci_req_disable_advertising(struct hci_request *req)
936 {
937 	if (ext_adv_capable(req->hdev)) {
938 		struct hci_cp_le_set_ext_adv_enable cp;
939 
940 		cp.enable = 0x00;
941 		/* Disable all sets since we only support one set at the moment */
942 		cp.num_of_sets = 0x00;
943 
944 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
945 	} else {
946 		u8 enable = 0x00;
947 
948 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
949 	}
950 }
951 
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)952 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
953 {
954 	u32 flags;
955 	struct adv_info *adv_instance;
956 
957 	if (instance == 0x00) {
958 		/* Instance 0 always manages the "Tx Power" and "Flags"
959 		 * fields
960 		 */
961 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
962 
963 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
964 		 * corresponds to the "connectable" instance flag.
965 		 */
966 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
967 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
968 
969 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
970 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
971 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
972 			flags |= MGMT_ADV_FLAG_DISCOV;
973 
974 		return flags;
975 	}
976 
977 	adv_instance = hci_find_adv_instance(hdev, instance);
978 
979 	/* Return 0 when we got an invalid instance identifier. */
980 	if (!adv_instance)
981 		return 0;
982 
983 	return adv_instance->flags;
984 }
985 
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)986 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
987 {
988 	/* If privacy is not enabled don't use RPA */
989 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
990 		return false;
991 
992 	/* If basic privacy mode is enabled use RPA */
993 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
994 		return true;
995 
996 	/* If limited privacy mode is enabled don't use RPA if we're
997 	 * both discoverable and bondable.
998 	 */
999 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1000 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1001 		return false;
1002 
1003 	/* We're neither bondable nor discoverable in the limited
1004 	 * privacy mode, therefore use RPA.
1005 	 */
1006 	return true;
1007 }
1008 
is_advertising_allowed(struct hci_dev * hdev,bool connectable)1009 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1010 {
1011 	/* If there is no connection we are OK to advertise. */
1012 	if (hci_conn_num(hdev, LE_LINK) == 0)
1013 		return true;
1014 
1015 	/* Check le_states if there is any connection in slave role. */
1016 	if (hdev->conn_hash.le_num_slave > 0) {
1017 		/* Slave connection state and non connectable mode bit 20. */
1018 		if (!connectable && !(hdev->le_states[2] & 0x10))
1019 			return false;
1020 
1021 		/* Slave connection state and connectable mode bit 38
1022 		 * and scannable bit 21.
1023 		 */
1024 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1025 				    !(hdev->le_states[2] & 0x20)))
1026 			return false;
1027 	}
1028 
1029 	/* Check le_states if there is any connection in master role. */
1030 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1031 		/* Master connection state and non connectable mode bit 18. */
1032 		if (!connectable && !(hdev->le_states[2] & 0x02))
1033 			return false;
1034 
1035 		/* Master connection state and connectable mode bit 35 and
1036 		 * scannable 19.
1037 		 */
1038 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1039 				    !(hdev->le_states[2] & 0x08)))
1040 			return false;
1041 	}
1042 
1043 	return true;
1044 }
1045 
__hci_req_enable_advertising(struct hci_request * req)1046 void __hci_req_enable_advertising(struct hci_request *req)
1047 {
1048 	struct hci_dev *hdev = req->hdev;
1049 	struct hci_cp_le_set_adv_param cp;
1050 	u8 own_addr_type, enable = 0x01;
1051 	bool connectable;
1052 	u32 flags;
1053 
1054 	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1055 
1056 	/* If the "connectable" instance flag was not set, then choose between
1057 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1058 	 */
1059 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1060 		      mgmt_get_connectable(hdev);
1061 
1062 	if (!is_advertising_allowed(hdev, connectable))
1063 		return;
1064 
1065 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1066 		__hci_req_disable_advertising(req);
1067 
1068 	/* Clear the HCI_LE_ADV bit temporarily so that the
1069 	 * hci_update_random_address knows that it's safe to go ahead
1070 	 * and write a new random address. The flag will be set back on
1071 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1072 	 */
1073 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1074 
1075 	/* Set require_privacy to true only when non-connectable
1076 	 * advertising is used. In that case it is fine to use a
1077 	 * non-resolvable private address.
1078 	 */
1079 	if (hci_update_random_address(req, !connectable,
1080 				      adv_use_rpa(hdev, flags),
1081 				      &own_addr_type) < 0)
1082 		return;
1083 
1084 	memset(&cp, 0, sizeof(cp));
1085 	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1086 	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1087 
1088 	if (connectable)
1089 		cp.type = LE_ADV_IND;
1090 	else if (get_cur_adv_instance_scan_rsp_len(hdev))
1091 		cp.type = LE_ADV_SCAN_IND;
1092 	else
1093 		cp.type = LE_ADV_NONCONN_IND;
1094 
1095 	cp.own_address_type = own_addr_type;
1096 	cp.channel_map = hdev->le_adv_channel_map;
1097 
1098 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1099 
1100 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1101 }
1102 
append_local_name(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1103 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1104 {
1105 	size_t short_len;
1106 	size_t complete_len;
1107 
1108 	/* no space left for name (+ NULL + type + len) */
1109 	if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1110 		return ad_len;
1111 
1112 	/* use complete name if present and fits */
1113 	complete_len = strlen(hdev->dev_name);
1114 	if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1115 		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1116 				       hdev->dev_name, complete_len + 1);
1117 
1118 	/* use short name if present */
1119 	short_len = strlen(hdev->short_name);
1120 	if (short_len)
1121 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1122 				       hdev->short_name, short_len + 1);
1123 
1124 	/* use shortened full name if present, we already know that name
1125 	 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1126 	 */
1127 	if (complete_len) {
1128 		u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1129 
1130 		memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1131 		name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1132 
1133 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1134 				       sizeof(name));
1135 	}
1136 
1137 	return ad_len;
1138 }
1139 
append_appearance(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1140 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1141 {
1142 	return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1143 }
1144 
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)1145 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1146 {
1147 	u8 scan_rsp_len = 0;
1148 
1149 	if (hdev->appearance) {
1150 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1151 	}
1152 
1153 	return append_local_name(hdev, ptr, scan_rsp_len);
1154 }
1155 
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1156 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1157 					u8 *ptr)
1158 {
1159 	struct adv_info *adv_instance;
1160 	u32 instance_flags;
1161 	u8 scan_rsp_len = 0;
1162 
1163 	adv_instance = hci_find_adv_instance(hdev, instance);
1164 	if (!adv_instance)
1165 		return 0;
1166 
1167 	instance_flags = adv_instance->flags;
1168 
1169 	if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1170 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1171 	}
1172 
1173 	memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1174 	       adv_instance->scan_rsp_len);
1175 
1176 	scan_rsp_len += adv_instance->scan_rsp_len;
1177 
1178 	if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1179 		scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1180 
1181 	return scan_rsp_len;
1182 }
1183 
__hci_req_update_scan_rsp_data(struct hci_request * req,u8 instance)1184 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1185 {
1186 	struct hci_dev *hdev = req->hdev;
1187 	u8 len;
1188 
1189 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1190 		return;
1191 
1192 	if (ext_adv_capable(hdev)) {
1193 		struct hci_cp_le_set_ext_scan_rsp_data cp;
1194 
1195 		memset(&cp, 0, sizeof(cp));
1196 
1197 		if (instance)
1198 			len = create_instance_scan_rsp_data(hdev, instance,
1199 							    cp.data);
1200 		else
1201 			len = create_default_scan_rsp_data(hdev, cp.data);
1202 
1203 		if (hdev->scan_rsp_data_len == len &&
1204 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1205 			return;
1206 
1207 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1208 		hdev->scan_rsp_data_len = len;
1209 
1210 		cp.handle = 0;
1211 		cp.length = len;
1212 		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1213 		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1214 
1215 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1216 			    &cp);
1217 	} else {
1218 		struct hci_cp_le_set_scan_rsp_data cp;
1219 
1220 		memset(&cp, 0, sizeof(cp));
1221 
1222 		if (instance)
1223 			len = create_instance_scan_rsp_data(hdev, instance,
1224 							    cp.data);
1225 		else
1226 			len = create_default_scan_rsp_data(hdev, cp.data);
1227 
1228 		if (hdev->scan_rsp_data_len == len &&
1229 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1230 			return;
1231 
1232 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1233 		hdev->scan_rsp_data_len = len;
1234 
1235 		cp.length = len;
1236 
1237 		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1238 	}
1239 }
1240 
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1241 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1242 {
1243 	struct adv_info *adv_instance = NULL;
1244 	u8 ad_len = 0, flags = 0;
1245 	u32 instance_flags;
1246 
1247 	/* Return 0 when the current instance identifier is invalid. */
1248 	if (instance) {
1249 		adv_instance = hci_find_adv_instance(hdev, instance);
1250 		if (!adv_instance)
1251 			return 0;
1252 	}
1253 
1254 	instance_flags = get_adv_instance_flags(hdev, instance);
1255 
1256 	/* The Add Advertising command allows userspace to set both the general
1257 	 * and limited discoverable flags.
1258 	 */
1259 	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1260 		flags |= LE_AD_GENERAL;
1261 
1262 	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1263 		flags |= LE_AD_LIMITED;
1264 
1265 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1266 		flags |= LE_AD_NO_BREDR;
1267 
1268 	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1269 		/* If a discovery flag wasn't provided, simply use the global
1270 		 * settings.
1271 		 */
1272 		if (!flags)
1273 			flags |= mgmt_get_adv_discov_flags(hdev);
1274 
1275 		/* If flags would still be empty, then there is no need to
1276 		 * include the "Flags" AD field".
1277 		 */
1278 		if (flags) {
1279 			ptr[0] = 0x02;
1280 			ptr[1] = EIR_FLAGS;
1281 			ptr[2] = flags;
1282 
1283 			ad_len += 3;
1284 			ptr += 3;
1285 		}
1286 	}
1287 
1288 	if (adv_instance) {
1289 		memcpy(ptr, adv_instance->adv_data,
1290 		       adv_instance->adv_data_len);
1291 		ad_len += adv_instance->adv_data_len;
1292 		ptr += adv_instance->adv_data_len;
1293 	}
1294 
1295 	if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1296 		s8 adv_tx_power;
1297 
1298 		if (ext_adv_capable(hdev)) {
1299 			if (adv_instance)
1300 				adv_tx_power = adv_instance->tx_power;
1301 			else
1302 				adv_tx_power = hdev->adv_tx_power;
1303 		} else {
1304 			adv_tx_power = hdev->adv_tx_power;
1305 		}
1306 
1307 		/* Provide Tx Power only if we can provide a valid value for it */
1308 		if (adv_tx_power != HCI_TX_POWER_INVALID) {
1309 			ptr[0] = 0x02;
1310 			ptr[1] = EIR_TX_POWER;
1311 			ptr[2] = (u8)adv_tx_power;
1312 
1313 			ad_len += 3;
1314 			ptr += 3;
1315 		}
1316 	}
1317 
1318 	return ad_len;
1319 }
1320 
__hci_req_update_adv_data(struct hci_request * req,u8 instance)1321 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1322 {
1323 	struct hci_dev *hdev = req->hdev;
1324 	u8 len;
1325 
1326 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1327 		return;
1328 
1329 	if (ext_adv_capable(hdev)) {
1330 		struct hci_cp_le_set_ext_adv_data cp;
1331 
1332 		memset(&cp, 0, sizeof(cp));
1333 
1334 		len = create_instance_adv_data(hdev, instance, cp.data);
1335 
1336 		/* There's nothing to do if the data hasn't changed */
1337 		if (hdev->adv_data_len == len &&
1338 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1339 			return;
1340 
1341 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1342 		hdev->adv_data_len = len;
1343 
1344 		cp.length = len;
1345 		cp.handle = 0;
1346 		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1347 		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1348 
1349 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1350 	} else {
1351 		struct hci_cp_le_set_adv_data cp;
1352 
1353 		memset(&cp, 0, sizeof(cp));
1354 
1355 		len = create_instance_adv_data(hdev, instance, cp.data);
1356 
1357 		/* There's nothing to do if the data hasn't changed */
1358 		if (hdev->adv_data_len == len &&
1359 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1360 			return;
1361 
1362 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1363 		hdev->adv_data_len = len;
1364 
1365 		cp.length = len;
1366 
1367 		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1368 	}
1369 }
1370 
hci_req_update_adv_data(struct hci_dev * hdev,u8 instance)1371 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1372 {
1373 	struct hci_request req;
1374 
1375 	hci_req_init(&req, hdev);
1376 	__hci_req_update_adv_data(&req, instance);
1377 
1378 	return hci_req_run(&req, NULL);
1379 }
1380 
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1381 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1382 {
1383 	BT_DBG("%s status %u", hdev->name, status);
1384 }
1385 
hci_req_reenable_advertising(struct hci_dev * hdev)1386 void hci_req_reenable_advertising(struct hci_dev *hdev)
1387 {
1388 	struct hci_request req;
1389 
1390 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1391 	    list_empty(&hdev->adv_instances))
1392 		return;
1393 
1394 	hci_req_init(&req, hdev);
1395 
1396 	if (hdev->cur_adv_instance) {
1397 		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1398 						true);
1399 	} else {
1400 		if (ext_adv_capable(hdev)) {
1401 			__hci_req_start_ext_adv(&req, 0x00);
1402 		} else {
1403 			__hci_req_update_adv_data(&req, 0x00);
1404 			__hci_req_update_scan_rsp_data(&req, 0x00);
1405 			__hci_req_enable_advertising(&req);
1406 		}
1407 	}
1408 
1409 	hci_req_run(&req, adv_enable_complete);
1410 }
1411 
adv_timeout_expire(struct work_struct * work)1412 static void adv_timeout_expire(struct work_struct *work)
1413 {
1414 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1415 					    adv_instance_expire.work);
1416 
1417 	struct hci_request req;
1418 	u8 instance;
1419 
1420 	BT_DBG("%s", hdev->name);
1421 
1422 	hci_dev_lock(hdev);
1423 
1424 	hdev->adv_instance_timeout = 0;
1425 
1426 	instance = hdev->cur_adv_instance;
1427 	if (instance == 0x00)
1428 		goto unlock;
1429 
1430 	hci_req_init(&req, hdev);
1431 
1432 	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1433 
1434 	if (list_empty(&hdev->adv_instances))
1435 		__hci_req_disable_advertising(&req);
1436 
1437 	hci_req_run(&req, NULL);
1438 
1439 unlock:
1440 	hci_dev_unlock(hdev);
1441 }
1442 
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)1443 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1444 			   bool use_rpa, struct adv_info *adv_instance,
1445 			   u8 *own_addr_type, bdaddr_t *rand_addr)
1446 {
1447 	int err;
1448 
1449 	bacpy(rand_addr, BDADDR_ANY);
1450 
1451 	/* If privacy is enabled use a resolvable private address. If
1452 	 * current RPA has expired then generate a new one.
1453 	 */
1454 	if (use_rpa) {
1455 		int to;
1456 
1457 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1458 
1459 		if (adv_instance) {
1460 			if (!adv_instance->rpa_expired &&
1461 			    !bacmp(&adv_instance->random_addr, &hdev->rpa))
1462 				return 0;
1463 
1464 			adv_instance->rpa_expired = false;
1465 		} else {
1466 			if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1467 			    !bacmp(&hdev->random_addr, &hdev->rpa))
1468 				return 0;
1469 		}
1470 
1471 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1472 		if (err < 0) {
1473 			BT_ERR("%s failed to generate new RPA", hdev->name);
1474 			return err;
1475 		}
1476 
1477 		bacpy(rand_addr, &hdev->rpa);
1478 
1479 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1480 		if (adv_instance)
1481 			queue_delayed_work(hdev->workqueue,
1482 					   &adv_instance->rpa_expired_cb, to);
1483 		else
1484 			queue_delayed_work(hdev->workqueue,
1485 					   &hdev->rpa_expired, to);
1486 
1487 		return 0;
1488 	}
1489 
1490 	/* In case of required privacy without resolvable private address,
1491 	 * use an non-resolvable private address. This is useful for
1492 	 * non-connectable advertising.
1493 	 */
1494 	if (require_privacy) {
1495 		bdaddr_t nrpa;
1496 
1497 		while (true) {
1498 			/* The non-resolvable private address is generated
1499 			 * from random six bytes with the two most significant
1500 			 * bits cleared.
1501 			 */
1502 			get_random_bytes(&nrpa, 6);
1503 			nrpa.b[5] &= 0x3f;
1504 
1505 			/* The non-resolvable private address shall not be
1506 			 * equal to the public address.
1507 			 */
1508 			if (bacmp(&hdev->bdaddr, &nrpa))
1509 				break;
1510 		}
1511 
1512 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1513 		bacpy(rand_addr, &nrpa);
1514 
1515 		return 0;
1516 	}
1517 
1518 	/* No privacy so use a public address. */
1519 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1520 
1521 	return 0;
1522 }
1523 
__hci_req_clear_ext_adv_sets(struct hci_request * req)1524 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1525 {
1526 	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1527 }
1528 
__hci_req_setup_ext_adv_instance(struct hci_request * req,u8 instance)1529 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1530 {
1531 	struct hci_cp_le_set_ext_adv_params cp;
1532 	struct hci_dev *hdev = req->hdev;
1533 	bool connectable;
1534 	u32 flags;
1535 	bdaddr_t random_addr;
1536 	u8 own_addr_type;
1537 	int err;
1538 	struct adv_info *adv_instance;
1539 	bool secondary_adv;
1540 	/* In ext adv set param interval is 3 octets */
1541 	const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1542 
1543 	if (instance > 0) {
1544 		adv_instance = hci_find_adv_instance(hdev, instance);
1545 		if (!adv_instance)
1546 			return -EINVAL;
1547 	} else {
1548 		adv_instance = NULL;
1549 	}
1550 
1551 	flags = get_adv_instance_flags(hdev, instance);
1552 
1553 	/* If the "connectable" instance flag was not set, then choose between
1554 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1555 	 */
1556 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1557 		      mgmt_get_connectable(hdev);
1558 
1559 	 if (!is_advertising_allowed(hdev, connectable))
1560 		return -EPERM;
1561 
1562 	/* Set require_privacy to true only when non-connectable
1563 	 * advertising is used. In that case it is fine to use a
1564 	 * non-resolvable private address.
1565 	 */
1566 	err = hci_get_random_address(hdev, !connectable,
1567 				     adv_use_rpa(hdev, flags), adv_instance,
1568 				     &own_addr_type, &random_addr);
1569 	if (err < 0)
1570 		return err;
1571 
1572 	memset(&cp, 0, sizeof(cp));
1573 
1574 	memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1575 	memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1576 
1577 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1578 
1579 	if (connectable) {
1580 		if (secondary_adv)
1581 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1582 		else
1583 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1584 	} else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1585 		if (secondary_adv)
1586 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1587 		else
1588 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1589 	} else {
1590 		if (secondary_adv)
1591 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1592 		else
1593 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1594 	}
1595 
1596 	cp.own_addr_type = own_addr_type;
1597 	cp.channel_map = hdev->le_adv_channel_map;
1598 	cp.tx_power = 127;
1599 	cp.handle = 0;
1600 
1601 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1602 		cp.primary_phy = HCI_ADV_PHY_1M;
1603 		cp.secondary_phy = HCI_ADV_PHY_2M;
1604 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1605 		cp.primary_phy = HCI_ADV_PHY_CODED;
1606 		cp.secondary_phy = HCI_ADV_PHY_CODED;
1607 	} else {
1608 		/* In all other cases use 1M */
1609 		cp.primary_phy = HCI_ADV_PHY_1M;
1610 		cp.secondary_phy = HCI_ADV_PHY_1M;
1611 	}
1612 
1613 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1614 
1615 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1616 	    bacmp(&random_addr, BDADDR_ANY)) {
1617 		struct hci_cp_le_set_adv_set_rand_addr cp;
1618 
1619 		/* Check if random address need to be updated */
1620 		if (adv_instance) {
1621 			if (!bacmp(&random_addr, &adv_instance->random_addr))
1622 				return 0;
1623 		} else {
1624 			if (!bacmp(&random_addr, &hdev->random_addr))
1625 				return 0;
1626 		}
1627 
1628 		memset(&cp, 0, sizeof(cp));
1629 
1630 		cp.handle = 0;
1631 		bacpy(&cp.bdaddr, &random_addr);
1632 
1633 		hci_req_add(req,
1634 			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1635 			    sizeof(cp), &cp);
1636 	}
1637 
1638 	return 0;
1639 }
1640 
__hci_req_enable_ext_advertising(struct hci_request * req)1641 void __hci_req_enable_ext_advertising(struct hci_request *req)
1642 {
1643 	struct hci_cp_le_set_ext_adv_enable *cp;
1644 	struct hci_cp_ext_adv_set *adv_set;
1645 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1646 
1647 	cp = (void *) data;
1648 	adv_set = (void *) cp->data;
1649 
1650 	memset(cp, 0, sizeof(*cp));
1651 
1652 	cp->enable = 0x01;
1653 	cp->num_of_sets = 0x01;
1654 
1655 	memset(adv_set, 0, sizeof(*adv_set));
1656 
1657 	adv_set->handle = 0;
1658 
1659 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1660 		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1661 		    data);
1662 }
1663 
__hci_req_start_ext_adv(struct hci_request * req,u8 instance)1664 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1665 {
1666 	struct hci_dev *hdev = req->hdev;
1667 	int err;
1668 
1669 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1670 		__hci_req_disable_advertising(req);
1671 
1672 	err = __hci_req_setup_ext_adv_instance(req, instance);
1673 	if (err < 0)
1674 		return err;
1675 
1676 	__hci_req_update_scan_rsp_data(req, instance);
1677 	__hci_req_enable_ext_advertising(req);
1678 
1679 	return 0;
1680 }
1681 
__hci_req_schedule_adv_instance(struct hci_request * req,u8 instance,bool force)1682 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1683 				    bool force)
1684 {
1685 	struct hci_dev *hdev = req->hdev;
1686 	struct adv_info *adv_instance = NULL;
1687 	u16 timeout;
1688 
1689 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1690 	    list_empty(&hdev->adv_instances))
1691 		return -EPERM;
1692 
1693 	if (hdev->adv_instance_timeout)
1694 		return -EBUSY;
1695 
1696 	adv_instance = hci_find_adv_instance(hdev, instance);
1697 	if (!adv_instance)
1698 		return -ENOENT;
1699 
1700 	/* A zero timeout means unlimited advertising. As long as there is
1701 	 * only one instance, duration should be ignored. We still set a timeout
1702 	 * in case further instances are being added later on.
1703 	 *
1704 	 * If the remaining lifetime of the instance is more than the duration
1705 	 * then the timeout corresponds to the duration, otherwise it will be
1706 	 * reduced to the remaining instance lifetime.
1707 	 */
1708 	if (adv_instance->timeout == 0 ||
1709 	    adv_instance->duration <= adv_instance->remaining_time)
1710 		timeout = adv_instance->duration;
1711 	else
1712 		timeout = adv_instance->remaining_time;
1713 
1714 	/* The remaining time is being reduced unless the instance is being
1715 	 * advertised without time limit.
1716 	 */
1717 	if (adv_instance->timeout)
1718 		adv_instance->remaining_time =
1719 				adv_instance->remaining_time - timeout;
1720 
1721 	hdev->adv_instance_timeout = timeout;
1722 	queue_delayed_work(hdev->req_workqueue,
1723 			   &hdev->adv_instance_expire,
1724 			   msecs_to_jiffies(timeout * 1000));
1725 
1726 	/* If we're just re-scheduling the same instance again then do not
1727 	 * execute any HCI commands. This happens when a single instance is
1728 	 * being advertised.
1729 	 */
1730 	if (!force && hdev->cur_adv_instance == instance &&
1731 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1732 		return 0;
1733 
1734 	hdev->cur_adv_instance = instance;
1735 	if (ext_adv_capable(hdev)) {
1736 		__hci_req_start_ext_adv(req, instance);
1737 	} else {
1738 		__hci_req_update_adv_data(req, instance);
1739 		__hci_req_update_scan_rsp_data(req, instance);
1740 		__hci_req_enable_advertising(req);
1741 	}
1742 
1743 	return 0;
1744 }
1745 
cancel_adv_timeout(struct hci_dev * hdev)1746 static void cancel_adv_timeout(struct hci_dev *hdev)
1747 {
1748 	if (hdev->adv_instance_timeout) {
1749 		hdev->adv_instance_timeout = 0;
1750 		cancel_delayed_work(&hdev->adv_instance_expire);
1751 	}
1752 }
1753 
1754 /* For a single instance:
1755  * - force == true: The instance will be removed even when its remaining
1756  *   lifetime is not zero.
1757  * - force == false: the instance will be deactivated but kept stored unless
1758  *   the remaining lifetime is zero.
1759  *
1760  * For instance == 0x00:
1761  * - force == true: All instances will be removed regardless of their timeout
1762  *   setting.
1763  * - force == false: Only instances that have a timeout will be removed.
1764  */
hci_req_clear_adv_instance(struct hci_dev * hdev,struct sock * sk,struct hci_request * req,u8 instance,bool force)1765 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1766 				struct hci_request *req, u8 instance,
1767 				bool force)
1768 {
1769 	struct adv_info *adv_instance, *n, *next_instance = NULL;
1770 	int err;
1771 	u8 rem_inst;
1772 
1773 	/* Cancel any timeout concerning the removed instance(s). */
1774 	if (!instance || hdev->cur_adv_instance == instance)
1775 		cancel_adv_timeout(hdev);
1776 
1777 	/* Get the next instance to advertise BEFORE we remove
1778 	 * the current one. This can be the same instance again
1779 	 * if there is only one instance.
1780 	 */
1781 	if (instance && hdev->cur_adv_instance == instance)
1782 		next_instance = hci_get_next_instance(hdev, instance);
1783 
1784 	if (instance == 0x00) {
1785 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1786 					 list) {
1787 			if (!(force || adv_instance->timeout))
1788 				continue;
1789 
1790 			rem_inst = adv_instance->instance;
1791 			err = hci_remove_adv_instance(hdev, rem_inst);
1792 			if (!err)
1793 				mgmt_advertising_removed(sk, hdev, rem_inst);
1794 		}
1795 	} else {
1796 		adv_instance = hci_find_adv_instance(hdev, instance);
1797 
1798 		if (force || (adv_instance && adv_instance->timeout &&
1799 			      !adv_instance->remaining_time)) {
1800 			/* Don't advertise a removed instance. */
1801 			if (next_instance &&
1802 			    next_instance->instance == instance)
1803 				next_instance = NULL;
1804 
1805 			err = hci_remove_adv_instance(hdev, instance);
1806 			if (!err)
1807 				mgmt_advertising_removed(sk, hdev, instance);
1808 		}
1809 	}
1810 
1811 	if (!req || !hdev_is_powered(hdev) ||
1812 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
1813 		return;
1814 
1815 	if (next_instance)
1816 		__hci_req_schedule_adv_instance(req, next_instance->instance,
1817 						false);
1818 }
1819 
set_random_addr(struct hci_request * req,bdaddr_t * rpa)1820 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1821 {
1822 	struct hci_dev *hdev = req->hdev;
1823 
1824 	/* If we're advertising or initiating an LE connection we can't
1825 	 * go ahead and change the random address at this time. This is
1826 	 * because the eventual initiator address used for the
1827 	 * subsequently created connection will be undefined (some
1828 	 * controllers use the new address and others the one we had
1829 	 * when the operation started).
1830 	 *
1831 	 * In this kind of scenario skip the update and let the random
1832 	 * address be updated at the next cycle.
1833 	 */
1834 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1835 	    hci_lookup_le_connect(hdev)) {
1836 		BT_DBG("Deferring random address update");
1837 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1838 		return;
1839 	}
1840 
1841 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1842 }
1843 
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)1844 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1845 			      bool use_rpa, u8 *own_addr_type)
1846 {
1847 	struct hci_dev *hdev = req->hdev;
1848 	int err;
1849 
1850 	/* If privacy is enabled use a resolvable private address. If
1851 	 * current RPA has expired or there is something else than
1852 	 * the current RPA in use, then generate a new one.
1853 	 */
1854 	if (use_rpa) {
1855 		int to;
1856 
1857 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1858 
1859 		if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1860 		    !bacmp(&hdev->random_addr, &hdev->rpa))
1861 			return 0;
1862 
1863 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1864 		if (err < 0) {
1865 			bt_dev_err(hdev, "failed to generate new RPA");
1866 			return err;
1867 		}
1868 
1869 		set_random_addr(req, &hdev->rpa);
1870 
1871 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1872 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1873 
1874 		return 0;
1875 	}
1876 
1877 	/* In case of required privacy without resolvable private address,
1878 	 * use an non-resolvable private address. This is useful for active
1879 	 * scanning and non-connectable advertising.
1880 	 */
1881 	if (require_privacy) {
1882 		bdaddr_t nrpa;
1883 
1884 		while (true) {
1885 			/* The non-resolvable private address is generated
1886 			 * from random six bytes with the two most significant
1887 			 * bits cleared.
1888 			 */
1889 			get_random_bytes(&nrpa, 6);
1890 			nrpa.b[5] &= 0x3f;
1891 
1892 			/* The non-resolvable private address shall not be
1893 			 * equal to the public address.
1894 			 */
1895 			if (bacmp(&hdev->bdaddr, &nrpa))
1896 				break;
1897 		}
1898 
1899 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1900 		set_random_addr(req, &nrpa);
1901 		return 0;
1902 	}
1903 
1904 	/* If forcing static address is in use or there is no public
1905 	 * address use the static address as random address (but skip
1906 	 * the HCI command if the current random address is already the
1907 	 * static one.
1908 	 *
1909 	 * In case BR/EDR has been disabled on a dual-mode controller
1910 	 * and a static address has been configured, then use that
1911 	 * address instead of the public BR/EDR address.
1912 	 */
1913 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1914 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1915 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1916 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1917 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1918 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1919 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1920 				    &hdev->static_addr);
1921 		return 0;
1922 	}
1923 
1924 	/* Neither privacy nor static address is being used so use a
1925 	 * public address.
1926 	 */
1927 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1928 
1929 	return 0;
1930 }
1931 
disconnected_whitelist_entries(struct hci_dev * hdev)1932 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1933 {
1934 	struct bdaddr_list *b;
1935 
1936 	list_for_each_entry(b, &hdev->whitelist, list) {
1937 		struct hci_conn *conn;
1938 
1939 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1940 		if (!conn)
1941 			return true;
1942 
1943 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1944 			return true;
1945 	}
1946 
1947 	return false;
1948 }
1949 
__hci_req_update_scan(struct hci_request * req)1950 void __hci_req_update_scan(struct hci_request *req)
1951 {
1952 	struct hci_dev *hdev = req->hdev;
1953 	u8 scan;
1954 
1955 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1956 		return;
1957 
1958 	if (!hdev_is_powered(hdev))
1959 		return;
1960 
1961 	if (mgmt_powering_down(hdev))
1962 		return;
1963 
1964 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1965 	    disconnected_whitelist_entries(hdev))
1966 		scan = SCAN_PAGE;
1967 	else
1968 		scan = SCAN_DISABLED;
1969 
1970 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1971 		scan |= SCAN_INQUIRY;
1972 
1973 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1974 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1975 		return;
1976 
1977 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1978 }
1979 
update_scan(struct hci_request * req,unsigned long opt)1980 static int update_scan(struct hci_request *req, unsigned long opt)
1981 {
1982 	hci_dev_lock(req->hdev);
1983 	__hci_req_update_scan(req);
1984 	hci_dev_unlock(req->hdev);
1985 	return 0;
1986 }
1987 
scan_update_work(struct work_struct * work)1988 static void scan_update_work(struct work_struct *work)
1989 {
1990 	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1991 
1992 	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1993 }
1994 
connectable_update(struct hci_request * req,unsigned long opt)1995 static int connectable_update(struct hci_request *req, unsigned long opt)
1996 {
1997 	struct hci_dev *hdev = req->hdev;
1998 
1999 	hci_dev_lock(hdev);
2000 
2001 	__hci_req_update_scan(req);
2002 
2003 	/* If BR/EDR is not enabled and we disable advertising as a
2004 	 * by-product of disabling connectable, we need to update the
2005 	 * advertising flags.
2006 	 */
2007 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2008 		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
2009 
2010 	/* Update the advertising parameters if necessary */
2011 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2012 	    !list_empty(&hdev->adv_instances)) {
2013 		if (ext_adv_capable(hdev))
2014 			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2015 		else
2016 			__hci_req_enable_advertising(req);
2017 	}
2018 
2019 	__hci_update_background_scan(req);
2020 
2021 	hci_dev_unlock(hdev);
2022 
2023 	return 0;
2024 }
2025 
connectable_update_work(struct work_struct * work)2026 static void connectable_update_work(struct work_struct *work)
2027 {
2028 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2029 					    connectable_update);
2030 	u8 status;
2031 
2032 	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2033 	mgmt_set_connectable_complete(hdev, status);
2034 }
2035 
get_service_classes(struct hci_dev * hdev)2036 static u8 get_service_classes(struct hci_dev *hdev)
2037 {
2038 	struct bt_uuid *uuid;
2039 	u8 val = 0;
2040 
2041 	list_for_each_entry(uuid, &hdev->uuids, list)
2042 		val |= uuid->svc_hint;
2043 
2044 	return val;
2045 }
2046 
__hci_req_update_class(struct hci_request * req)2047 void __hci_req_update_class(struct hci_request *req)
2048 {
2049 	struct hci_dev *hdev = req->hdev;
2050 	u8 cod[3];
2051 
2052 	BT_DBG("%s", hdev->name);
2053 
2054 	if (!hdev_is_powered(hdev))
2055 		return;
2056 
2057 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2058 		return;
2059 
2060 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2061 		return;
2062 
2063 	cod[0] = hdev->minor_class;
2064 	cod[1] = hdev->major_class;
2065 	cod[2] = get_service_classes(hdev);
2066 
2067 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2068 		cod[1] |= 0x20;
2069 
2070 	if (memcmp(cod, hdev->dev_class, 3) == 0)
2071 		return;
2072 
2073 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2074 }
2075 
write_iac(struct hci_request * req)2076 static void write_iac(struct hci_request *req)
2077 {
2078 	struct hci_dev *hdev = req->hdev;
2079 	struct hci_cp_write_current_iac_lap cp;
2080 
2081 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2082 		return;
2083 
2084 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2085 		/* Limited discoverable mode */
2086 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
2087 		cp.iac_lap[0] = 0x00;	/* LIAC */
2088 		cp.iac_lap[1] = 0x8b;
2089 		cp.iac_lap[2] = 0x9e;
2090 		cp.iac_lap[3] = 0x33;	/* GIAC */
2091 		cp.iac_lap[4] = 0x8b;
2092 		cp.iac_lap[5] = 0x9e;
2093 	} else {
2094 		/* General discoverable mode */
2095 		cp.num_iac = 1;
2096 		cp.iac_lap[0] = 0x33;	/* GIAC */
2097 		cp.iac_lap[1] = 0x8b;
2098 		cp.iac_lap[2] = 0x9e;
2099 	}
2100 
2101 	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2102 		    (cp.num_iac * 3) + 1, &cp);
2103 }
2104 
discoverable_update(struct hci_request * req,unsigned long opt)2105 static int discoverable_update(struct hci_request *req, unsigned long opt)
2106 {
2107 	struct hci_dev *hdev = req->hdev;
2108 
2109 	hci_dev_lock(hdev);
2110 
2111 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2112 		write_iac(req);
2113 		__hci_req_update_scan(req);
2114 		__hci_req_update_class(req);
2115 	}
2116 
2117 	/* Advertising instances don't use the global discoverable setting, so
2118 	 * only update AD if advertising was enabled using Set Advertising.
2119 	 */
2120 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2121 		__hci_req_update_adv_data(req, 0x00);
2122 
2123 		/* Discoverable mode affects the local advertising
2124 		 * address in limited privacy mode.
2125 		 */
2126 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2127 			if (ext_adv_capable(hdev))
2128 				__hci_req_start_ext_adv(req, 0x00);
2129 			else
2130 				__hci_req_enable_advertising(req);
2131 		}
2132 	}
2133 
2134 	hci_dev_unlock(hdev);
2135 
2136 	return 0;
2137 }
2138 
discoverable_update_work(struct work_struct * work)2139 static void discoverable_update_work(struct work_struct *work)
2140 {
2141 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2142 					    discoverable_update);
2143 	u8 status;
2144 
2145 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2146 	mgmt_set_discoverable_complete(hdev, status);
2147 }
2148 
__hci_abort_conn(struct hci_request * req,struct hci_conn * conn,u8 reason)2149 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2150 		      u8 reason)
2151 {
2152 	switch (conn->state) {
2153 	case BT_CONNECTED:
2154 	case BT_CONFIG:
2155 		if (conn->type == AMP_LINK) {
2156 			struct hci_cp_disconn_phy_link cp;
2157 
2158 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2159 			cp.reason = reason;
2160 			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2161 				    &cp);
2162 		} else {
2163 			struct hci_cp_disconnect dc;
2164 
2165 			dc.handle = cpu_to_le16(conn->handle);
2166 			dc.reason = reason;
2167 			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2168 		}
2169 
2170 		conn->state = BT_DISCONN;
2171 
2172 		break;
2173 	case BT_CONNECT:
2174 		if (conn->type == LE_LINK) {
2175 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2176 				break;
2177 			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2178 				    0, NULL);
2179 		} else if (conn->type == ACL_LINK) {
2180 			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2181 				break;
2182 			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2183 				    6, &conn->dst);
2184 		}
2185 		break;
2186 	case BT_CONNECT2:
2187 		if (conn->type == ACL_LINK) {
2188 			struct hci_cp_reject_conn_req rej;
2189 
2190 			bacpy(&rej.bdaddr, &conn->dst);
2191 			rej.reason = reason;
2192 
2193 			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2194 				    sizeof(rej), &rej);
2195 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2196 			struct hci_cp_reject_sync_conn_req rej;
2197 
2198 			bacpy(&rej.bdaddr, &conn->dst);
2199 
2200 			/* SCO rejection has its own limited set of
2201 			 * allowed error values (0x0D-0x0F) which isn't
2202 			 * compatible with most values passed to this
2203 			 * function. To be safe hard-code one of the
2204 			 * values that's suitable for SCO.
2205 			 */
2206 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2207 
2208 			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2209 				    sizeof(rej), &rej);
2210 		}
2211 		break;
2212 	default:
2213 		conn->state = BT_CLOSED;
2214 		break;
2215 	}
2216 }
2217 
abort_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)2218 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2219 {
2220 	if (status)
2221 		BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2222 }
2223 
hci_abort_conn(struct hci_conn * conn,u8 reason)2224 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2225 {
2226 	struct hci_request req;
2227 	int err;
2228 
2229 	hci_req_init(&req, conn->hdev);
2230 
2231 	__hci_abort_conn(&req, conn, reason);
2232 
2233 	err = hci_req_run(&req, abort_conn_complete);
2234 	if (err && err != -ENODATA) {
2235 		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2236 		return err;
2237 	}
2238 
2239 	return 0;
2240 }
2241 
update_bg_scan(struct hci_request * req,unsigned long opt)2242 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2243 {
2244 	hci_dev_lock(req->hdev);
2245 	__hci_update_background_scan(req);
2246 	hci_dev_unlock(req->hdev);
2247 	return 0;
2248 }
2249 
bg_scan_update(struct work_struct * work)2250 static void bg_scan_update(struct work_struct *work)
2251 {
2252 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2253 					    bg_scan_update);
2254 	struct hci_conn *conn;
2255 	u8 status;
2256 	int err;
2257 
2258 	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2259 	if (!err)
2260 		return;
2261 
2262 	hci_dev_lock(hdev);
2263 
2264 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2265 	if (conn)
2266 		hci_le_conn_failed(conn, status);
2267 
2268 	hci_dev_unlock(hdev);
2269 }
2270 
le_scan_disable(struct hci_request * req,unsigned long opt)2271 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2272 {
2273 	hci_req_add_le_scan_disable(req);
2274 	return 0;
2275 }
2276 
bredr_inquiry(struct hci_request * req,unsigned long opt)2277 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2278 {
2279 	u8 length = opt;
2280 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2281 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2282 	struct hci_cp_inquiry cp;
2283 
2284 	BT_DBG("%s", req->hdev->name);
2285 
2286 	hci_dev_lock(req->hdev);
2287 	hci_inquiry_cache_flush(req->hdev);
2288 	hci_dev_unlock(req->hdev);
2289 
2290 	memset(&cp, 0, sizeof(cp));
2291 
2292 	if (req->hdev->discovery.limited)
2293 		memcpy(&cp.lap, liac, sizeof(cp.lap));
2294 	else
2295 		memcpy(&cp.lap, giac, sizeof(cp.lap));
2296 
2297 	cp.length = length;
2298 
2299 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2300 
2301 	return 0;
2302 }
2303 
le_scan_disable_work(struct work_struct * work)2304 static void le_scan_disable_work(struct work_struct *work)
2305 {
2306 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2307 					    le_scan_disable.work);
2308 	u8 status;
2309 
2310 	BT_DBG("%s", hdev->name);
2311 
2312 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2313 		return;
2314 
2315 	cancel_delayed_work(&hdev->le_scan_restart);
2316 
2317 	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2318 	if (status) {
2319 		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2320 			   status);
2321 		return;
2322 	}
2323 
2324 	hdev->discovery.scan_start = 0;
2325 
2326 	/* If we were running LE only scan, change discovery state. If
2327 	 * we were running both LE and BR/EDR inquiry simultaneously,
2328 	 * and BR/EDR inquiry is already finished, stop discovery,
2329 	 * otherwise BR/EDR inquiry will stop discovery when finished.
2330 	 * If we will resolve remote device name, do not change
2331 	 * discovery state.
2332 	 */
2333 
2334 	if (hdev->discovery.type == DISCOV_TYPE_LE)
2335 		goto discov_stopped;
2336 
2337 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2338 		return;
2339 
2340 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2341 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2342 		    hdev->discovery.state != DISCOVERY_RESOLVING)
2343 			goto discov_stopped;
2344 
2345 		return;
2346 	}
2347 
2348 	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2349 		     HCI_CMD_TIMEOUT, &status);
2350 	if (status) {
2351 		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2352 		goto discov_stopped;
2353 	}
2354 
2355 	return;
2356 
2357 discov_stopped:
2358 	hci_dev_lock(hdev);
2359 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2360 	hci_dev_unlock(hdev);
2361 }
2362 
le_scan_restart(struct hci_request * req,unsigned long opt)2363 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2364 {
2365 	struct hci_dev *hdev = req->hdev;
2366 
2367 	/* If controller is not scanning we are done. */
2368 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2369 		return 0;
2370 
2371 	hci_req_add_le_scan_disable(req);
2372 
2373 	if (use_ext_scan(hdev)) {
2374 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2375 
2376 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2377 		ext_enable_cp.enable = LE_SCAN_ENABLE;
2378 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2379 
2380 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2381 			    sizeof(ext_enable_cp), &ext_enable_cp);
2382 	} else {
2383 		struct hci_cp_le_set_scan_enable cp;
2384 
2385 		memset(&cp, 0, sizeof(cp));
2386 		cp.enable = LE_SCAN_ENABLE;
2387 		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2388 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2389 	}
2390 
2391 	return 0;
2392 }
2393 
le_scan_restart_work(struct work_struct * work)2394 static void le_scan_restart_work(struct work_struct *work)
2395 {
2396 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2397 					    le_scan_restart.work);
2398 	unsigned long timeout, duration, scan_start, now;
2399 	u8 status;
2400 
2401 	BT_DBG("%s", hdev->name);
2402 
2403 	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2404 	if (status) {
2405 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
2406 			   status);
2407 		return;
2408 	}
2409 
2410 	hci_dev_lock(hdev);
2411 
2412 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2413 	    !hdev->discovery.scan_start)
2414 		goto unlock;
2415 
2416 	/* When the scan was started, hdev->le_scan_disable has been queued
2417 	 * after duration from scan_start. During scan restart this job
2418 	 * has been canceled, and we need to queue it again after proper
2419 	 * timeout, to make sure that scan does not run indefinitely.
2420 	 */
2421 	duration = hdev->discovery.scan_duration;
2422 	scan_start = hdev->discovery.scan_start;
2423 	now = jiffies;
2424 	if (now - scan_start <= duration) {
2425 		int elapsed;
2426 
2427 		if (now >= scan_start)
2428 			elapsed = now - scan_start;
2429 		else
2430 			elapsed = ULONG_MAX - scan_start + now;
2431 
2432 		timeout = duration - elapsed;
2433 	} else {
2434 		timeout = 0;
2435 	}
2436 
2437 	queue_delayed_work(hdev->req_workqueue,
2438 			   &hdev->le_scan_disable, timeout);
2439 
2440 unlock:
2441 	hci_dev_unlock(hdev);
2442 }
2443 
active_scan(struct hci_request * req,unsigned long opt)2444 static int active_scan(struct hci_request *req, unsigned long opt)
2445 {
2446 	uint16_t interval = opt;
2447 	struct hci_dev *hdev = req->hdev;
2448 	u8 own_addr_type;
2449 	int err;
2450 
2451 	BT_DBG("%s", hdev->name);
2452 
2453 	if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2454 		hci_dev_lock(hdev);
2455 
2456 		/* Don't let discovery abort an outgoing connection attempt
2457 		 * that's using directed advertising.
2458 		 */
2459 		if (hci_lookup_le_connect(hdev)) {
2460 			hci_dev_unlock(hdev);
2461 			return -EBUSY;
2462 		}
2463 
2464 		cancel_adv_timeout(hdev);
2465 		hci_dev_unlock(hdev);
2466 
2467 		__hci_req_disable_advertising(req);
2468 	}
2469 
2470 	/* If controller is scanning, it means the background scanning is
2471 	 * running. Thus, we should temporarily stop it in order to set the
2472 	 * discovery scanning parameters.
2473 	 */
2474 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2475 		hci_req_add_le_scan_disable(req);
2476 
2477 	/* All active scans will be done with either a resolvable private
2478 	 * address (when privacy feature has been enabled) or non-resolvable
2479 	 * private address.
2480 	 */
2481 	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2482 					&own_addr_type);
2483 	if (err < 0)
2484 		own_addr_type = ADDR_LE_DEV_PUBLIC;
2485 
2486 	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2487 			   own_addr_type, 0);
2488 	return 0;
2489 }
2490 
interleaved_discov(struct hci_request * req,unsigned long opt)2491 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2492 {
2493 	int err;
2494 
2495 	BT_DBG("%s", req->hdev->name);
2496 
2497 	err = active_scan(req, opt);
2498 	if (err)
2499 		return err;
2500 
2501 	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2502 }
2503 
start_discovery(struct hci_dev * hdev,u8 * status)2504 static void start_discovery(struct hci_dev *hdev, u8 *status)
2505 {
2506 	unsigned long timeout;
2507 
2508 	BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2509 
2510 	switch (hdev->discovery.type) {
2511 	case DISCOV_TYPE_BREDR:
2512 		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2513 			hci_req_sync(hdev, bredr_inquiry,
2514 				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2515 				     status);
2516 		return;
2517 	case DISCOV_TYPE_INTERLEAVED:
2518 		/* When running simultaneous discovery, the LE scanning time
2519 		 * should occupy the whole discovery time sine BR/EDR inquiry
2520 		 * and LE scanning are scheduled by the controller.
2521 		 *
2522 		 * For interleaving discovery in comparison, BR/EDR inquiry
2523 		 * and LE scanning are done sequentially with separate
2524 		 * timeouts.
2525 		 */
2526 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2527 			     &hdev->quirks)) {
2528 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2529 			/* During simultaneous discovery, we double LE scan
2530 			 * interval. We must leave some time for the controller
2531 			 * to do BR/EDR inquiry.
2532 			 */
2533 			hci_req_sync(hdev, interleaved_discov,
2534 				     DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2535 				     status);
2536 			break;
2537 		}
2538 
2539 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2540 		hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2541 			     HCI_CMD_TIMEOUT, status);
2542 		break;
2543 	case DISCOV_TYPE_LE:
2544 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2545 		hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2546 			     HCI_CMD_TIMEOUT, status);
2547 		break;
2548 	default:
2549 		*status = HCI_ERROR_UNSPECIFIED;
2550 		return;
2551 	}
2552 
2553 	if (*status)
2554 		return;
2555 
2556 	BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2557 
2558 	/* When service discovery is used and the controller has a
2559 	 * strict duplicate filter, it is important to remember the
2560 	 * start and duration of the scan. This is required for
2561 	 * restarting scanning during the discovery phase.
2562 	 */
2563 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2564 		     hdev->discovery.result_filtering) {
2565 		hdev->discovery.scan_start = jiffies;
2566 		hdev->discovery.scan_duration = timeout;
2567 	}
2568 
2569 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2570 			   timeout);
2571 }
2572 
hci_req_stop_discovery(struct hci_request * req)2573 bool hci_req_stop_discovery(struct hci_request *req)
2574 {
2575 	struct hci_dev *hdev = req->hdev;
2576 	struct discovery_state *d = &hdev->discovery;
2577 	struct hci_cp_remote_name_req_cancel cp;
2578 	struct inquiry_entry *e;
2579 	bool ret = false;
2580 
2581 	BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2582 
2583 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2584 		if (test_bit(HCI_INQUIRY, &hdev->flags))
2585 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2586 
2587 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2588 			cancel_delayed_work(&hdev->le_scan_disable);
2589 			hci_req_add_le_scan_disable(req);
2590 		}
2591 
2592 		ret = true;
2593 	} else {
2594 		/* Passive scanning */
2595 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2596 			hci_req_add_le_scan_disable(req);
2597 			ret = true;
2598 		}
2599 	}
2600 
2601 	/* No further actions needed for LE-only discovery */
2602 	if (d->type == DISCOV_TYPE_LE)
2603 		return ret;
2604 
2605 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2606 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2607 						     NAME_PENDING);
2608 		if (!e)
2609 			return ret;
2610 
2611 		bacpy(&cp.bdaddr, &e->data.bdaddr);
2612 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2613 			    &cp);
2614 		ret = true;
2615 	}
2616 
2617 	return ret;
2618 }
2619 
stop_discovery(struct hci_request * req,unsigned long opt)2620 static int stop_discovery(struct hci_request *req, unsigned long opt)
2621 {
2622 	hci_dev_lock(req->hdev);
2623 	hci_req_stop_discovery(req);
2624 	hci_dev_unlock(req->hdev);
2625 
2626 	return 0;
2627 }
2628 
discov_update(struct work_struct * work)2629 static void discov_update(struct work_struct *work)
2630 {
2631 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2632 					    discov_update);
2633 	u8 status = 0;
2634 
2635 	switch (hdev->discovery.state) {
2636 	case DISCOVERY_STARTING:
2637 		start_discovery(hdev, &status);
2638 		mgmt_start_discovery_complete(hdev, status);
2639 		if (status)
2640 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2641 		else
2642 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2643 		break;
2644 	case DISCOVERY_STOPPING:
2645 		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2646 		mgmt_stop_discovery_complete(hdev, status);
2647 		if (!status)
2648 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2649 		break;
2650 	case DISCOVERY_STOPPED:
2651 	default:
2652 		return;
2653 	}
2654 }
2655 
discov_off(struct work_struct * work)2656 static void discov_off(struct work_struct *work)
2657 {
2658 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2659 					    discov_off.work);
2660 
2661 	BT_DBG("%s", hdev->name);
2662 
2663 	hci_dev_lock(hdev);
2664 
2665 	/* When discoverable timeout triggers, then just make sure
2666 	 * the limited discoverable flag is cleared. Even in the case
2667 	 * of a timeout triggered from general discoverable, it is
2668 	 * safe to unconditionally clear the flag.
2669 	 */
2670 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2671 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2672 	hdev->discov_timeout = 0;
2673 
2674 	hci_dev_unlock(hdev);
2675 
2676 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2677 	mgmt_new_settings(hdev);
2678 }
2679 
powered_update_hci(struct hci_request * req,unsigned long opt)2680 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2681 {
2682 	struct hci_dev *hdev = req->hdev;
2683 	u8 link_sec;
2684 
2685 	hci_dev_lock(hdev);
2686 
2687 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2688 	    !lmp_host_ssp_capable(hdev)) {
2689 		u8 mode = 0x01;
2690 
2691 		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2692 
2693 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2694 			u8 support = 0x01;
2695 
2696 			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2697 				    sizeof(support), &support);
2698 		}
2699 	}
2700 
2701 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2702 	    lmp_bredr_capable(hdev)) {
2703 		struct hci_cp_write_le_host_supported cp;
2704 
2705 		cp.le = 0x01;
2706 		cp.simul = 0x00;
2707 
2708 		/* Check first if we already have the right
2709 		 * host state (host features set)
2710 		 */
2711 		if (cp.le != lmp_host_le_capable(hdev) ||
2712 		    cp.simul != lmp_host_le_br_capable(hdev))
2713 			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2714 				    sizeof(cp), &cp);
2715 	}
2716 
2717 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2718 		/* Make sure the controller has a good default for
2719 		 * advertising data. This also applies to the case
2720 		 * where BR/EDR was toggled during the AUTO_OFF phase.
2721 		 */
2722 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2723 		    list_empty(&hdev->adv_instances)) {
2724 			int err;
2725 
2726 			if (ext_adv_capable(hdev)) {
2727 				err = __hci_req_setup_ext_adv_instance(req,
2728 								       0x00);
2729 				if (!err)
2730 					__hci_req_update_scan_rsp_data(req,
2731 								       0x00);
2732 			} else {
2733 				err = 0;
2734 				__hci_req_update_adv_data(req, 0x00);
2735 				__hci_req_update_scan_rsp_data(req, 0x00);
2736 			}
2737 
2738 			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2739 				if (!ext_adv_capable(hdev))
2740 					__hci_req_enable_advertising(req);
2741 				else if (!err)
2742 					__hci_req_enable_ext_advertising(req);
2743 			}
2744 		} else if (!list_empty(&hdev->adv_instances)) {
2745 			struct adv_info *adv_instance;
2746 
2747 			adv_instance = list_first_entry(&hdev->adv_instances,
2748 							struct adv_info, list);
2749 			__hci_req_schedule_adv_instance(req,
2750 							adv_instance->instance,
2751 							true);
2752 		}
2753 	}
2754 
2755 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2756 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2757 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2758 			    sizeof(link_sec), &link_sec);
2759 
2760 	if (lmp_bredr_capable(hdev)) {
2761 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2762 			__hci_req_write_fast_connectable(req, true);
2763 		else
2764 			__hci_req_write_fast_connectable(req, false);
2765 		__hci_req_update_scan(req);
2766 		__hci_req_update_class(req);
2767 		__hci_req_update_name(req);
2768 		__hci_req_update_eir(req);
2769 	}
2770 
2771 	hci_dev_unlock(hdev);
2772 	return 0;
2773 }
2774 
__hci_req_hci_power_on(struct hci_dev * hdev)2775 int __hci_req_hci_power_on(struct hci_dev *hdev)
2776 {
2777 	/* Register the available SMP channels (BR/EDR and LE) only when
2778 	 * successfully powering on the controller. This late
2779 	 * registration is required so that LE SMP can clearly decide if
2780 	 * the public address or static address is used.
2781 	 */
2782 	smp_register(hdev);
2783 
2784 	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2785 			      NULL);
2786 }
2787 
hci_request_setup(struct hci_dev * hdev)2788 void hci_request_setup(struct hci_dev *hdev)
2789 {
2790 	INIT_WORK(&hdev->discov_update, discov_update);
2791 	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2792 	INIT_WORK(&hdev->scan_update, scan_update_work);
2793 	INIT_WORK(&hdev->connectable_update, connectable_update_work);
2794 	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2795 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2796 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2797 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2798 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2799 }
2800 
hci_request_cancel_all(struct hci_dev * hdev)2801 void hci_request_cancel_all(struct hci_dev *hdev)
2802 {
2803 	hci_req_sync_cancel(hdev, ENODEV);
2804 
2805 	cancel_work_sync(&hdev->discov_update);
2806 	cancel_work_sync(&hdev->bg_scan_update);
2807 	cancel_work_sync(&hdev->scan_update);
2808 	cancel_work_sync(&hdev->connectable_update);
2809 	cancel_work_sync(&hdev->discoverable_update);
2810 	cancel_delayed_work_sync(&hdev->discov_off);
2811 	cancel_delayed_work_sync(&hdev->le_scan_disable);
2812 	cancel_delayed_work_sync(&hdev->le_scan_restart);
2813 
2814 	if (hdev->adv_instance_timeout) {
2815 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
2816 		hdev->adv_instance_timeout = 0;
2817 	}
2818 }
2819