1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 
33 #define HCI_REQ_DONE	  0
34 #define HCI_REQ_PEND	  1
35 #define HCI_REQ_CANCELED  2
36 
hci_req_init(struct hci_request * req,struct hci_dev * hdev)37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 	skb_queue_head_init(&req->cmd_q);
40 	req->hdev = hdev;
41 	req->err = 0;
42 }
43 
hci_req_purge(struct hci_request * req)44 void hci_req_purge(struct hci_request *req)
45 {
46 	skb_queue_purge(&req->cmd_q);
47 }
48 
hci_req_status_pend(struct hci_dev * hdev)49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51 	return hdev->req_status == HCI_REQ_PEND;
52 }
53 
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 		   hci_req_complete_skb_t complete_skb)
56 {
57 	struct hci_dev *hdev = req->hdev;
58 	struct sk_buff *skb;
59 	unsigned long flags;
60 
61 	BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62 
63 	/* If an error occurred during request building, remove all HCI
64 	 * commands queued on the HCI request queue.
65 	 */
66 	if (req->err) {
67 		skb_queue_purge(&req->cmd_q);
68 		return req->err;
69 	}
70 
71 	/* Do not allow empty requests */
72 	if (skb_queue_empty(&req->cmd_q))
73 		return -ENODATA;
74 
75 	skb = skb_peek_tail(&req->cmd_q);
76 	if (complete) {
77 		bt_cb(skb)->hci.req_complete = complete;
78 	} else if (complete_skb) {
79 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 	}
82 
83 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86 
87 	queue_work(hdev->workqueue, &hdev->cmd_work);
88 
89 	return 0;
90 }
91 
hci_req_run(struct hci_request * req,hci_req_complete_t complete)92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94 	return req_run(req, complete, NULL);
95 }
96 
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99 	return req_run(req, NULL, complete);
100 }
101 
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 				  struct sk_buff *skb)
104 {
105 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
106 
107 	if (hdev->req_status == HCI_REQ_PEND) {
108 		hdev->req_result = result;
109 		hdev->req_status = HCI_REQ_DONE;
110 		if (skb)
111 			hdev->req_skb = skb_get(skb);
112 		wake_up_interruptible(&hdev->req_wait_q);
113 	}
114 }
115 
hci_req_sync_cancel(struct hci_dev * hdev,int err)116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
119 
120 	if (hdev->req_status == HCI_REQ_PEND) {
121 		hdev->req_result = err;
122 		hdev->req_status = HCI_REQ_CANCELED;
123 		wake_up_interruptible(&hdev->req_wait_q);
124 	}
125 }
126 
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 				  const void *param, u8 event, u32 timeout)
129 {
130 	struct hci_request req;
131 	struct sk_buff *skb;
132 	int err = 0;
133 
134 	BT_DBG("%s", hdev->name);
135 
136 	hci_req_init(&req, hdev);
137 
138 	hci_req_add_ev(&req, opcode, plen, param, event);
139 
140 	hdev->req_status = HCI_REQ_PEND;
141 
142 	err = hci_req_run_skb(&req, hci_req_sync_complete);
143 	if (err < 0)
144 		return ERR_PTR(err);
145 
146 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 			hdev->req_status != HCI_REQ_PEND, timeout);
148 
149 	if (err == -ERESTARTSYS)
150 		return ERR_PTR(-EINTR);
151 
152 	switch (hdev->req_status) {
153 	case HCI_REQ_DONE:
154 		err = -bt_to_errno(hdev->req_result);
155 		break;
156 
157 	case HCI_REQ_CANCELED:
158 		err = -hdev->req_result;
159 		break;
160 
161 	default:
162 		err = -ETIMEDOUT;
163 		break;
164 	}
165 
166 	hdev->req_status = hdev->req_result = 0;
167 	skb = hdev->req_skb;
168 	hdev->req_skb = NULL;
169 
170 	BT_DBG("%s end: err %d", hdev->name, err);
171 
172 	if (err < 0) {
173 		kfree_skb(skb);
174 		return ERR_PTR(err);
175 	}
176 
177 	if (!skb)
178 		return ERR_PTR(-ENODATA);
179 
180 	return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183 
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 			       const void *param, u32 timeout)
186 {
187 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190 
191 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 						     unsigned long opt),
194 		   unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196 	struct hci_request req;
197 	int err = 0;
198 
199 	BT_DBG("%s start", hdev->name);
200 
201 	hci_req_init(&req, hdev);
202 
203 	hdev->req_status = HCI_REQ_PEND;
204 
205 	err = func(&req, opt);
206 	if (err) {
207 		if (hci_status)
208 			*hci_status = HCI_ERROR_UNSPECIFIED;
209 		return err;
210 	}
211 
212 	err = hci_req_run_skb(&req, hci_req_sync_complete);
213 	if (err < 0) {
214 		hdev->req_status = 0;
215 
216 		/* ENODATA means the HCI request command queue is empty.
217 		 * This can happen when a request with conditionals doesn't
218 		 * trigger any commands to be sent. This is normal behavior
219 		 * and should not trigger an error return.
220 		 */
221 		if (err == -ENODATA) {
222 			if (hci_status)
223 				*hci_status = 0;
224 			return 0;
225 		}
226 
227 		if (hci_status)
228 			*hci_status = HCI_ERROR_UNSPECIFIED;
229 
230 		return err;
231 	}
232 
233 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 			hdev->req_status != HCI_REQ_PEND, timeout);
235 
236 	if (err == -ERESTARTSYS)
237 		return -EINTR;
238 
239 	switch (hdev->req_status) {
240 	case HCI_REQ_DONE:
241 		err = -bt_to_errno(hdev->req_result);
242 		if (hci_status)
243 			*hci_status = hdev->req_result;
244 		break;
245 
246 	case HCI_REQ_CANCELED:
247 		err = -hdev->req_result;
248 		if (hci_status)
249 			*hci_status = HCI_ERROR_UNSPECIFIED;
250 		break;
251 
252 	default:
253 		err = -ETIMEDOUT;
254 		if (hci_status)
255 			*hci_status = HCI_ERROR_UNSPECIFIED;
256 		break;
257 	}
258 
259 	kfree_skb(hdev->req_skb);
260 	hdev->req_skb = NULL;
261 	hdev->req_status = hdev->req_result = 0;
262 
263 	BT_DBG("%s end: err %d", hdev->name, err);
264 
265 	return err;
266 }
267 
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 						  unsigned long opt),
270 		 unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272 	int ret;
273 
274 	if (!test_bit(HCI_UP, &hdev->flags))
275 		return -ENETDOWN;
276 
277 	/* Serialize all requests */
278 	hci_req_sync_lock(hdev);
279 	ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280 	hci_req_sync_unlock(hdev);
281 
282 	return ret;
283 }
284 
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)285 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 				const void *param)
287 {
288 	int len = HCI_COMMAND_HDR_SIZE + plen;
289 	struct hci_command_hdr *hdr;
290 	struct sk_buff *skb;
291 
292 	skb = bt_skb_alloc(len, GFP_ATOMIC);
293 	if (!skb)
294 		return NULL;
295 
296 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297 	hdr->opcode = cpu_to_le16(opcode);
298 	hdr->plen   = plen;
299 
300 	if (plen)
301 		skb_put_data(skb, param, plen);
302 
303 	BT_DBG("skb len %d", skb->len);
304 
305 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 	hci_skb_opcode(skb) = opcode;
307 
308 	return skb;
309 }
310 
311 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)312 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 		    const void *param, u8 event)
314 {
315 	struct hci_dev *hdev = req->hdev;
316 	struct sk_buff *skb;
317 
318 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319 
320 	/* If an error occurred during request building, there is no point in
321 	 * queueing the HCI command. We can simply return.
322 	 */
323 	if (req->err)
324 		return;
325 
326 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 	if (!skb) {
328 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 			   opcode);
330 		req->err = -ENOMEM;
331 		return;
332 	}
333 
334 	if (skb_queue_empty(&req->cmd_q))
335 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
336 
337 	bt_cb(skb)->hci.req_event = event;
338 
339 	skb_queue_tail(&req->cmd_q, skb);
340 }
341 
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)342 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 		 const void *param)
344 {
345 	hci_req_add_ev(req, opcode, plen, param, 0);
346 }
347 
__hci_req_write_fast_connectable(struct hci_request * req,bool enable)348 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349 {
350 	struct hci_dev *hdev = req->hdev;
351 	struct hci_cp_write_page_scan_activity acp;
352 	u8 type;
353 
354 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 		return;
356 
357 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 		return;
359 
360 	if (enable) {
361 		type = PAGE_SCAN_TYPE_INTERLACED;
362 
363 		/* 160 msec page scan interval */
364 		acp.interval = cpu_to_le16(0x0100);
365 	} else {
366 		type = hdev->def_page_scan_type;
367 		acp.interval = cpu_to_le16(hdev->def_page_scan_int);
368 	}
369 
370 	acp.window = cpu_to_le16(hdev->def_page_scan_window);
371 
372 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
374 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
375 			    sizeof(acp), &acp);
376 
377 	if (hdev->page_scan_type != type)
378 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
379 }
380 
381 /* This function controls the background scanning based on hdev->pend_le_conns
382  * list. If there are pending LE connection we start the background scanning,
383  * otherwise we stop it.
384  *
385  * This function requires the caller holds hdev->lock.
386  */
__hci_update_background_scan(struct hci_request * req)387 static void __hci_update_background_scan(struct hci_request *req)
388 {
389 	struct hci_dev *hdev = req->hdev;
390 
391 	if (!test_bit(HCI_UP, &hdev->flags) ||
392 	    test_bit(HCI_INIT, &hdev->flags) ||
393 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
394 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
395 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
396 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
397 		return;
398 
399 	/* No point in doing scanning if LE support hasn't been enabled */
400 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
401 		return;
402 
403 	/* If discovery is active don't interfere with it */
404 	if (hdev->discovery.state != DISCOVERY_STOPPED)
405 		return;
406 
407 	/* Reset RSSI and UUID filters when starting background scanning
408 	 * since these filters are meant for service discovery only.
409 	 *
410 	 * The Start Discovery and Start Service Discovery operations
411 	 * ensure to set proper values for RSSI threshold and UUID
412 	 * filter list. So it is safe to just reset them here.
413 	 */
414 	hci_discovery_filter_clear(hdev);
415 
416 	BT_DBG("%s ADV monitoring is %s", hdev->name,
417 	       hci_is_adv_monitoring(hdev) ? "on" : "off");
418 
419 	if (list_empty(&hdev->pend_le_conns) &&
420 	    list_empty(&hdev->pend_le_reports) &&
421 	    !hci_is_adv_monitoring(hdev)) {
422 		/* If there is no pending LE connections or devices
423 		 * to be scanned for or no ADV monitors, we should stop the
424 		 * background scanning.
425 		 */
426 
427 		/* If controller is not scanning we are done. */
428 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
429 			return;
430 
431 		hci_req_add_le_scan_disable(req, false);
432 
433 		BT_DBG("%s stopping background scanning", hdev->name);
434 	} else {
435 		/* If there is at least one pending LE connection, we should
436 		 * keep the background scan running.
437 		 */
438 
439 		/* If controller is connecting, we should not start scanning
440 		 * since some controllers are not able to scan and connect at
441 		 * the same time.
442 		 */
443 		if (hci_lookup_le_connect(hdev))
444 			return;
445 
446 		/* If controller is currently scanning, we stop it to ensure we
447 		 * don't miss any advertising (due to duplicates filter).
448 		 */
449 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
450 			hci_req_add_le_scan_disable(req, false);
451 
452 		hci_req_add_le_passive_scan(req);
453 
454 		BT_DBG("%s starting background scanning", hdev->name);
455 	}
456 }
457 
__hci_req_update_name(struct hci_request * req)458 void __hci_req_update_name(struct hci_request *req)
459 {
460 	struct hci_dev *hdev = req->hdev;
461 	struct hci_cp_write_local_name cp;
462 
463 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
464 
465 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
466 }
467 
468 #define PNP_INFO_SVCLASS_ID		0x1200
469 
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)470 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
471 {
472 	u8 *ptr = data, *uuids_start = NULL;
473 	struct bt_uuid *uuid;
474 
475 	if (len < 4)
476 		return ptr;
477 
478 	list_for_each_entry(uuid, &hdev->uuids, list) {
479 		u16 uuid16;
480 
481 		if (uuid->size != 16)
482 			continue;
483 
484 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
485 		if (uuid16 < 0x1100)
486 			continue;
487 
488 		if (uuid16 == PNP_INFO_SVCLASS_ID)
489 			continue;
490 
491 		if (!uuids_start) {
492 			uuids_start = ptr;
493 			uuids_start[0] = 1;
494 			uuids_start[1] = EIR_UUID16_ALL;
495 			ptr += 2;
496 		}
497 
498 		/* Stop if not enough space to put next UUID */
499 		if ((ptr - data) + sizeof(u16) > len) {
500 			uuids_start[1] = EIR_UUID16_SOME;
501 			break;
502 		}
503 
504 		*ptr++ = (uuid16 & 0x00ff);
505 		*ptr++ = (uuid16 & 0xff00) >> 8;
506 		uuids_start[0] += sizeof(uuid16);
507 	}
508 
509 	return ptr;
510 }
511 
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)512 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
513 {
514 	u8 *ptr = data, *uuids_start = NULL;
515 	struct bt_uuid *uuid;
516 
517 	if (len < 6)
518 		return ptr;
519 
520 	list_for_each_entry(uuid, &hdev->uuids, list) {
521 		if (uuid->size != 32)
522 			continue;
523 
524 		if (!uuids_start) {
525 			uuids_start = ptr;
526 			uuids_start[0] = 1;
527 			uuids_start[1] = EIR_UUID32_ALL;
528 			ptr += 2;
529 		}
530 
531 		/* Stop if not enough space to put next UUID */
532 		if ((ptr - data) + sizeof(u32) > len) {
533 			uuids_start[1] = EIR_UUID32_SOME;
534 			break;
535 		}
536 
537 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
538 		ptr += sizeof(u32);
539 		uuids_start[0] += sizeof(u32);
540 	}
541 
542 	return ptr;
543 }
544 
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)545 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
546 {
547 	u8 *ptr = data, *uuids_start = NULL;
548 	struct bt_uuid *uuid;
549 
550 	if (len < 18)
551 		return ptr;
552 
553 	list_for_each_entry(uuid, &hdev->uuids, list) {
554 		if (uuid->size != 128)
555 			continue;
556 
557 		if (!uuids_start) {
558 			uuids_start = ptr;
559 			uuids_start[0] = 1;
560 			uuids_start[1] = EIR_UUID128_ALL;
561 			ptr += 2;
562 		}
563 
564 		/* Stop if not enough space to put next UUID */
565 		if ((ptr - data) + 16 > len) {
566 			uuids_start[1] = EIR_UUID128_SOME;
567 			break;
568 		}
569 
570 		memcpy(ptr, uuid->uuid, 16);
571 		ptr += 16;
572 		uuids_start[0] += 16;
573 	}
574 
575 	return ptr;
576 }
577 
create_eir(struct hci_dev * hdev,u8 * data)578 static void create_eir(struct hci_dev *hdev, u8 *data)
579 {
580 	u8 *ptr = data;
581 	size_t name_len;
582 
583 	name_len = strlen(hdev->dev_name);
584 
585 	if (name_len > 0) {
586 		/* EIR Data type */
587 		if (name_len > 48) {
588 			name_len = 48;
589 			ptr[1] = EIR_NAME_SHORT;
590 		} else
591 			ptr[1] = EIR_NAME_COMPLETE;
592 
593 		/* EIR Data length */
594 		ptr[0] = name_len + 1;
595 
596 		memcpy(ptr + 2, hdev->dev_name, name_len);
597 
598 		ptr += (name_len + 2);
599 	}
600 
601 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
602 		ptr[0] = 2;
603 		ptr[1] = EIR_TX_POWER;
604 		ptr[2] = (u8) hdev->inq_tx_power;
605 
606 		ptr += 3;
607 	}
608 
609 	if (hdev->devid_source > 0) {
610 		ptr[0] = 9;
611 		ptr[1] = EIR_DEVICE_ID;
612 
613 		put_unaligned_le16(hdev->devid_source, ptr + 2);
614 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
615 		put_unaligned_le16(hdev->devid_product, ptr + 6);
616 		put_unaligned_le16(hdev->devid_version, ptr + 8);
617 
618 		ptr += 10;
619 	}
620 
621 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 }
625 
__hci_req_update_eir(struct hci_request * req)626 void __hci_req_update_eir(struct hci_request *req)
627 {
628 	struct hci_dev *hdev = req->hdev;
629 	struct hci_cp_write_eir cp;
630 
631 	if (!hdev_is_powered(hdev))
632 		return;
633 
634 	if (!lmp_ext_inq_capable(hdev))
635 		return;
636 
637 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
638 		return;
639 
640 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
641 		return;
642 
643 	memset(&cp, 0, sizeof(cp));
644 
645 	create_eir(hdev, cp.data);
646 
647 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
648 		return;
649 
650 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
651 
652 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
653 }
654 
hci_req_add_le_scan_disable(struct hci_request * req,bool rpa_le_conn)655 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
656 {
657 	struct hci_dev *hdev = req->hdev;
658 
659 	if (hdev->scanning_paused) {
660 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
661 		return;
662 	}
663 
664 	if (use_ext_scan(hdev)) {
665 		struct hci_cp_le_set_ext_scan_enable cp;
666 
667 		memset(&cp, 0, sizeof(cp));
668 		cp.enable = LE_SCAN_DISABLE;
669 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
670 			    &cp);
671 	} else {
672 		struct hci_cp_le_set_scan_enable cp;
673 
674 		memset(&cp, 0, sizeof(cp));
675 		cp.enable = LE_SCAN_DISABLE;
676 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
677 	}
678 
679 	/* Disable address resolution */
680 	if (use_ll_privacy(hdev) &&
681 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
682 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
683 		__u8 enable = 0x00;
684 
685 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
686 	}
687 }
688 
del_from_white_list(struct hci_request * req,bdaddr_t * bdaddr,u8 bdaddr_type)689 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
690 				u8 bdaddr_type)
691 {
692 	struct hci_cp_le_del_from_white_list cp;
693 
694 	cp.bdaddr_type = bdaddr_type;
695 	bacpy(&cp.bdaddr, bdaddr);
696 
697 	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
698 		   cp.bdaddr_type);
699 	hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
700 
701 	if (use_ll_privacy(req->hdev)) {
702 		struct smp_irk *irk;
703 
704 		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
705 		if (irk) {
706 			struct hci_cp_le_del_from_resolv_list cp;
707 
708 			cp.bdaddr_type = bdaddr_type;
709 			bacpy(&cp.bdaddr, bdaddr);
710 
711 			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
712 				    sizeof(cp), &cp);
713 		}
714 	}
715 }
716 
717 /* Adds connection to white list if needed. On error, returns -1. */
add_to_white_list(struct hci_request * req,struct hci_conn_params * params,u8 * num_entries,bool allow_rpa)718 static int add_to_white_list(struct hci_request *req,
719 			     struct hci_conn_params *params, u8 *num_entries,
720 			     bool allow_rpa)
721 {
722 	struct hci_cp_le_add_to_white_list cp;
723 	struct hci_dev *hdev = req->hdev;
724 
725 	/* Already in white list */
726 	if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
727 				   params->addr_type))
728 		return 0;
729 
730 	/* Select filter policy to accept all advertising */
731 	if (*num_entries >= hdev->le_white_list_size)
732 		return -1;
733 
734 	/* White list can not be used with RPAs */
735 	if (!allow_rpa && !use_ll_privacy(hdev) &&
736 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
737 		return -1;
738 	}
739 
740 	/* During suspend, only wakeable devices can be in whitelist */
741 	if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
742 						   params->current_flags))
743 		return 0;
744 
745 	*num_entries += 1;
746 	cp.bdaddr_type = params->addr_type;
747 	bacpy(&cp.bdaddr, &params->addr);
748 
749 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
750 		   cp.bdaddr_type);
751 	hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
752 
753 	if (use_ll_privacy(hdev)) {
754 		struct smp_irk *irk;
755 
756 		irk = hci_find_irk_by_addr(hdev, &params->addr,
757 					   params->addr_type);
758 		if (irk) {
759 			struct hci_cp_le_add_to_resolv_list cp;
760 
761 			cp.bdaddr_type = params->addr_type;
762 			bacpy(&cp.bdaddr, &params->addr);
763 			memcpy(cp.peer_irk, irk->val, 16);
764 
765 			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
766 				memcpy(cp.local_irk, hdev->irk, 16);
767 			else
768 				memset(cp.local_irk, 0, 16);
769 
770 			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
771 				    sizeof(cp), &cp);
772 		}
773 	}
774 
775 	return 0;
776 }
777 
update_white_list(struct hci_request * req)778 static u8 update_white_list(struct hci_request *req)
779 {
780 	struct hci_dev *hdev = req->hdev;
781 	struct hci_conn_params *params;
782 	struct bdaddr_list *b;
783 	u8 num_entries = 0;
784 	bool pend_conn, pend_report;
785 	/* We allow whitelisting even with RPAs in suspend. In the worst case,
786 	 * we won't be able to wake from devices that use the privacy1.2
787 	 * features. Additionally, once we support privacy1.2 and IRK
788 	 * offloading, we can update this to also check for those conditions.
789 	 */
790 	bool allow_rpa = hdev->suspended;
791 
792 	/* Go through the current white list programmed into the
793 	 * controller one by one and check if that address is still
794 	 * in the list of pending connections or list of devices to
795 	 * report. If not present in either list, then queue the
796 	 * command to remove it from the controller.
797 	 */
798 	list_for_each_entry(b, &hdev->le_white_list, list) {
799 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
800 						      &b->bdaddr,
801 						      b->bdaddr_type);
802 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
803 							&b->bdaddr,
804 							b->bdaddr_type);
805 
806 		/* If the device is not likely to connect or report,
807 		 * remove it from the whitelist.
808 		 */
809 		if (!pend_conn && !pend_report) {
810 			del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
811 			continue;
812 		}
813 
814 		/* White list can not be used with RPAs */
815 		if (!allow_rpa && !use_ll_privacy(hdev) &&
816 		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
817 			return 0x00;
818 		}
819 
820 		num_entries++;
821 	}
822 
823 	/* Since all no longer valid white list entries have been
824 	 * removed, walk through the list of pending connections
825 	 * and ensure that any new device gets programmed into
826 	 * the controller.
827 	 *
828 	 * If the list of the devices is larger than the list of
829 	 * available white list entries in the controller, then
830 	 * just abort and return filer policy value to not use the
831 	 * white list.
832 	 */
833 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
834 		if (add_to_white_list(req, params, &num_entries, allow_rpa))
835 			return 0x00;
836 	}
837 
838 	/* After adding all new pending connections, walk through
839 	 * the list of pending reports and also add these to the
840 	 * white list if there is still space. Abort if space runs out.
841 	 */
842 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
843 		if (add_to_white_list(req, params, &num_entries, allow_rpa))
844 			return 0x00;
845 	}
846 
847 	/* Once the controller offloading of advertisement monitor is in place,
848 	 * the if condition should include the support of MSFT extension
849 	 * support. If suspend is ongoing, whitelist should be the default to
850 	 * prevent waking by random advertisements.
851 	 */
852 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
853 		return 0x00;
854 
855 	/* Select filter policy to use white list */
856 	return 0x01;
857 }
858 
scan_use_rpa(struct hci_dev * hdev)859 static bool scan_use_rpa(struct hci_dev *hdev)
860 {
861 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
862 }
863 
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,bool addr_resolv)864 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
865 			       u16 window, u8 own_addr_type, u8 filter_policy,
866 			       bool addr_resolv)
867 {
868 	struct hci_dev *hdev = req->hdev;
869 
870 	if (hdev->scanning_paused) {
871 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
872 		return;
873 	}
874 
875 	if (use_ll_privacy(hdev) &&
876 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
877 	    addr_resolv) {
878 		u8 enable = 0x01;
879 
880 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
881 	}
882 
883 	/* Use ext scanning if set ext scan param and ext scan enable is
884 	 * supported
885 	 */
886 	if (use_ext_scan(hdev)) {
887 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
888 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
889 		struct hci_cp_le_scan_phy_params *phy_params;
890 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
891 		u32 plen;
892 
893 		ext_param_cp = (void *)data;
894 		phy_params = (void *)ext_param_cp->data;
895 
896 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
897 		ext_param_cp->own_addr_type = own_addr_type;
898 		ext_param_cp->filter_policy = filter_policy;
899 
900 		plen = sizeof(*ext_param_cp);
901 
902 		if (scan_1m(hdev) || scan_2m(hdev)) {
903 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
904 
905 			memset(phy_params, 0, sizeof(*phy_params));
906 			phy_params->type = type;
907 			phy_params->interval = cpu_to_le16(interval);
908 			phy_params->window = cpu_to_le16(window);
909 
910 			plen += sizeof(*phy_params);
911 			phy_params++;
912 		}
913 
914 		if (scan_coded(hdev)) {
915 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
916 
917 			memset(phy_params, 0, sizeof(*phy_params));
918 			phy_params->type = type;
919 			phy_params->interval = cpu_to_le16(interval);
920 			phy_params->window = cpu_to_le16(window);
921 
922 			plen += sizeof(*phy_params);
923 			phy_params++;
924 		}
925 
926 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
927 			    plen, ext_param_cp);
928 
929 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
930 		ext_enable_cp.enable = LE_SCAN_ENABLE;
931 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
932 
933 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
934 			    sizeof(ext_enable_cp), &ext_enable_cp);
935 	} else {
936 		struct hci_cp_le_set_scan_param param_cp;
937 		struct hci_cp_le_set_scan_enable enable_cp;
938 
939 		memset(&param_cp, 0, sizeof(param_cp));
940 		param_cp.type = type;
941 		param_cp.interval = cpu_to_le16(interval);
942 		param_cp.window = cpu_to_le16(window);
943 		param_cp.own_address_type = own_addr_type;
944 		param_cp.filter_policy = filter_policy;
945 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
946 			    &param_cp);
947 
948 		memset(&enable_cp, 0, sizeof(enable_cp));
949 		enable_cp.enable = LE_SCAN_ENABLE;
950 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
951 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
952 			    &enable_cp);
953 	}
954 }
955 
956 /* Returns true if an le connection is in the scanning state */
hci_is_le_conn_scanning(struct hci_dev * hdev)957 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
958 {
959 	struct hci_conn_hash *h = &hdev->conn_hash;
960 	struct hci_conn  *c;
961 
962 	rcu_read_lock();
963 
964 	list_for_each_entry_rcu(c, &h->list, list) {
965 		if (c->type == LE_LINK && c->state == BT_CONNECT &&
966 		    test_bit(HCI_CONN_SCANNING, &c->flags)) {
967 			rcu_read_unlock();
968 			return true;
969 		}
970 	}
971 
972 	rcu_read_unlock();
973 
974 	return false;
975 }
976 
977 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
978  * controller based address resolution to be able to reconfigure
979  * resolving list.
980  */
hci_req_add_le_passive_scan(struct hci_request * req)981 void hci_req_add_le_passive_scan(struct hci_request *req)
982 {
983 	struct hci_dev *hdev = req->hdev;
984 	u8 own_addr_type;
985 	u8 filter_policy;
986 	u16 window, interval;
987 	/* Background scanning should run with address resolution */
988 	bool addr_resolv = true;
989 
990 	if (hdev->scanning_paused) {
991 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
992 		return;
993 	}
994 
995 	/* Set require_privacy to false since no SCAN_REQ are send
996 	 * during passive scanning. Not using an non-resolvable address
997 	 * here is important so that peer devices using direct
998 	 * advertising with our address will be correctly reported
999 	 * by the controller.
1000 	 */
1001 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1002 				      &own_addr_type))
1003 		return;
1004 
1005 	/* Adding or removing entries from the white list must
1006 	 * happen before enabling scanning. The controller does
1007 	 * not allow white list modification while scanning.
1008 	 */
1009 	filter_policy = update_white_list(req);
1010 
1011 	/* When the controller is using random resolvable addresses and
1012 	 * with that having LE privacy enabled, then controllers with
1013 	 * Extended Scanner Filter Policies support can now enable support
1014 	 * for handling directed advertising.
1015 	 *
1016 	 * So instead of using filter polices 0x00 (no whitelist)
1017 	 * and 0x01 (whitelist enabled) use the new filter policies
1018 	 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1019 	 */
1020 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1021 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1022 		filter_policy |= 0x02;
1023 
1024 	if (hdev->suspended) {
1025 		window = hdev->le_scan_window_suspend;
1026 		interval = hdev->le_scan_int_suspend;
1027 	} else if (hci_is_le_conn_scanning(hdev)) {
1028 		window = hdev->le_scan_window_connect;
1029 		interval = hdev->le_scan_int_connect;
1030 	} else if (hci_is_adv_monitoring(hdev)) {
1031 		window = hdev->le_scan_window_adv_monitor;
1032 		interval = hdev->le_scan_int_adv_monitor;
1033 	} else {
1034 		window = hdev->le_scan_window;
1035 		interval = hdev->le_scan_interval;
1036 	}
1037 
1038 	bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1039 	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1040 			   own_addr_type, filter_policy, addr_resolv);
1041 }
1042 
get_adv_instance_scan_rsp_len(struct hci_dev * hdev,u8 instance)1043 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1044 {
1045 	struct adv_info *adv_instance;
1046 
1047 	/* Instance 0x00 always set local name */
1048 	if (instance == 0x00)
1049 		return 1;
1050 
1051 	adv_instance = hci_find_adv_instance(hdev, instance);
1052 	if (!adv_instance)
1053 		return 0;
1054 
1055 	/* TODO: Take into account the "appearance" and "local-name" flags here.
1056 	 * These are currently being ignored as they are not supported.
1057 	 */
1058 	return adv_instance->scan_rsp_len;
1059 }
1060 
hci_req_clear_event_filter(struct hci_request * req)1061 static void hci_req_clear_event_filter(struct hci_request *req)
1062 {
1063 	struct hci_cp_set_event_filter f;
1064 
1065 	memset(&f, 0, sizeof(f));
1066 	f.flt_type = HCI_FLT_CLEAR_ALL;
1067 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1068 
1069 	/* Update page scan state (since we may have modified it when setting
1070 	 * the event filter).
1071 	 */
1072 	__hci_req_update_scan(req);
1073 }
1074 
hci_req_set_event_filter(struct hci_request * req)1075 static void hci_req_set_event_filter(struct hci_request *req)
1076 {
1077 	struct bdaddr_list_with_flags *b;
1078 	struct hci_cp_set_event_filter f;
1079 	struct hci_dev *hdev = req->hdev;
1080 	u8 scan = SCAN_DISABLED;
1081 
1082 	/* Always clear event filter when starting */
1083 	hci_req_clear_event_filter(req);
1084 
1085 	list_for_each_entry(b, &hdev->whitelist, list) {
1086 		if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1087 					b->current_flags))
1088 			continue;
1089 
1090 		memset(&f, 0, sizeof(f));
1091 		bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1092 		f.flt_type = HCI_FLT_CONN_SETUP;
1093 		f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1094 		f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1095 
1096 		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1097 		hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1098 		scan = SCAN_PAGE;
1099 	}
1100 
1101 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1102 }
1103 
hci_req_config_le_suspend_scan(struct hci_request * req)1104 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1105 {
1106 	/* Before changing params disable scan if enabled */
1107 	if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1108 		hci_req_add_le_scan_disable(req, false);
1109 
1110 	/* Configure params and enable scanning */
1111 	hci_req_add_le_passive_scan(req);
1112 
1113 	/* Block suspend notifier on response */
1114 	set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1115 }
1116 
cancel_adv_timeout(struct hci_dev * hdev)1117 static void cancel_adv_timeout(struct hci_dev *hdev)
1118 {
1119 	if (hdev->adv_instance_timeout) {
1120 		hdev->adv_instance_timeout = 0;
1121 		cancel_delayed_work(&hdev->adv_instance_expire);
1122 	}
1123 }
1124 
1125 /* This function requires the caller holds hdev->lock */
hci_suspend_adv_instances(struct hci_request * req)1126 static void hci_suspend_adv_instances(struct hci_request *req)
1127 {
1128 	bt_dev_dbg(req->hdev, "Suspending advertising instances");
1129 
1130 	/* Call to disable any advertisements active on the controller.
1131 	 * This will succeed even if no advertisements are configured.
1132 	 */
1133 	__hci_req_disable_advertising(req);
1134 
1135 	/* If we are using software rotation, pause the loop */
1136 	if (!ext_adv_capable(req->hdev))
1137 		cancel_adv_timeout(req->hdev);
1138 }
1139 
1140 /* This function requires the caller holds hdev->lock */
hci_resume_adv_instances(struct hci_request * req)1141 static void hci_resume_adv_instances(struct hci_request *req)
1142 {
1143 	struct adv_info *adv;
1144 
1145 	bt_dev_dbg(req->hdev, "Resuming advertising instances");
1146 
1147 	if (ext_adv_capable(req->hdev)) {
1148 		/* Call for each tracked instance to be re-enabled */
1149 		list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1150 			__hci_req_enable_ext_advertising(req,
1151 							 adv->instance);
1152 		}
1153 
1154 	} else {
1155 		/* Schedule for most recent instance to be restarted and begin
1156 		 * the software rotation loop
1157 		 */
1158 		__hci_req_schedule_adv_instance(req,
1159 						req->hdev->cur_adv_instance,
1160 						true);
1161 	}
1162 }
1163 
suspend_req_complete(struct hci_dev * hdev,u8 status,u16 opcode)1164 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1165 {
1166 	bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1167 		   status);
1168 	if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1169 	    test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1170 		wake_up(&hdev->suspend_wait_q);
1171 	}
1172 }
1173 
1174 /* Call with hci_dev_lock */
hci_req_prepare_suspend(struct hci_dev * hdev,enum suspended_state next)1175 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1176 {
1177 	int old_state;
1178 	struct hci_conn *conn;
1179 	struct hci_request req;
1180 	u8 page_scan;
1181 	int disconnect_counter;
1182 
1183 	if (next == hdev->suspend_state) {
1184 		bt_dev_dbg(hdev, "Same state before and after: %d", next);
1185 		goto done;
1186 	}
1187 
1188 	hdev->suspend_state = next;
1189 	hci_req_init(&req, hdev);
1190 
1191 	if (next == BT_SUSPEND_DISCONNECT) {
1192 		/* Mark device as suspended */
1193 		hdev->suspended = true;
1194 
1195 		/* Pause discovery if not already stopped */
1196 		old_state = hdev->discovery.state;
1197 		if (old_state != DISCOVERY_STOPPED) {
1198 			set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1199 			hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1200 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1201 		}
1202 
1203 		hdev->discovery_paused = true;
1204 		hdev->discovery_old_state = old_state;
1205 
1206 		/* Stop directed advertising */
1207 		old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1208 		if (old_state) {
1209 			set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1210 			cancel_delayed_work(&hdev->discov_off);
1211 			queue_delayed_work(hdev->req_workqueue,
1212 					   &hdev->discov_off, 0);
1213 		}
1214 
1215 		/* Pause other advertisements */
1216 		if (hdev->adv_instance_cnt)
1217 			hci_suspend_adv_instances(&req);
1218 
1219 		hdev->advertising_paused = true;
1220 		hdev->advertising_old_state = old_state;
1221 		/* Disable page scan */
1222 		page_scan = SCAN_DISABLED;
1223 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1224 
1225 		/* Disable LE passive scan if enabled */
1226 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1227 			hci_req_add_le_scan_disable(&req, false);
1228 
1229 		/* Mark task needing completion */
1230 		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1231 
1232 		/* Prevent disconnects from causing scanning to be re-enabled */
1233 		hdev->scanning_paused = true;
1234 
1235 		/* Run commands before disconnecting */
1236 		hci_req_run(&req, suspend_req_complete);
1237 
1238 		disconnect_counter = 0;
1239 		/* Soft disconnect everything (power off) */
1240 		list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1241 			hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1242 			disconnect_counter++;
1243 		}
1244 
1245 		if (disconnect_counter > 0) {
1246 			bt_dev_dbg(hdev,
1247 				   "Had %d disconnects. Will wait on them",
1248 				   disconnect_counter);
1249 			set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1250 		}
1251 	} else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1252 		/* Unpause to take care of updating scanning params */
1253 		hdev->scanning_paused = false;
1254 		/* Enable event filter for paired devices */
1255 		hci_req_set_event_filter(&req);
1256 		/* Enable passive scan at lower duty cycle */
1257 		hci_req_config_le_suspend_scan(&req);
1258 		/* Pause scan changes again. */
1259 		hdev->scanning_paused = true;
1260 		hci_req_run(&req, suspend_req_complete);
1261 	} else {
1262 		hdev->suspended = false;
1263 		hdev->scanning_paused = false;
1264 
1265 		hci_req_clear_event_filter(&req);
1266 		/* Reset passive/background scanning to normal */
1267 		hci_req_config_le_suspend_scan(&req);
1268 
1269 		/* Unpause directed advertising */
1270 		hdev->advertising_paused = false;
1271 		if (hdev->advertising_old_state) {
1272 			set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1273 				hdev->suspend_tasks);
1274 			hci_dev_set_flag(hdev, HCI_ADVERTISING);
1275 			queue_work(hdev->req_workqueue,
1276 				   &hdev->discoverable_update);
1277 			hdev->advertising_old_state = 0;
1278 		}
1279 
1280 		/* Resume other advertisements */
1281 		if (hdev->adv_instance_cnt)
1282 			hci_resume_adv_instances(&req);
1283 
1284 		/* Unpause discovery */
1285 		hdev->discovery_paused = false;
1286 		if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1287 		    hdev->discovery_old_state != DISCOVERY_STOPPING) {
1288 			set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1289 			hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1290 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1291 		}
1292 
1293 		hci_req_run(&req, suspend_req_complete);
1294 	}
1295 
1296 	hdev->suspend_state = next;
1297 
1298 done:
1299 	clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1300 	wake_up(&hdev->suspend_wait_q);
1301 }
1302 
get_cur_adv_instance_scan_rsp_len(struct hci_dev * hdev)1303 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1304 {
1305 	u8 instance = hdev->cur_adv_instance;
1306 	struct adv_info *adv_instance;
1307 
1308 	/* Instance 0x00 always set local name */
1309 	if (instance == 0x00)
1310 		return 1;
1311 
1312 	adv_instance = hci_find_adv_instance(hdev, instance);
1313 	if (!adv_instance)
1314 		return 0;
1315 
1316 	/* TODO: Take into account the "appearance" and "local-name" flags here.
1317 	 * These are currently being ignored as they are not supported.
1318 	 */
1319 	return adv_instance->scan_rsp_len;
1320 }
1321 
__hci_req_disable_advertising(struct hci_request * req)1322 void __hci_req_disable_advertising(struct hci_request *req)
1323 {
1324 	if (ext_adv_capable(req->hdev)) {
1325 		__hci_req_disable_ext_adv_instance(req, 0x00);
1326 
1327 	} else {
1328 		u8 enable = 0x00;
1329 
1330 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1331 	}
1332 }
1333 
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)1334 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1335 {
1336 	u32 flags;
1337 	struct adv_info *adv_instance;
1338 
1339 	if (instance == 0x00) {
1340 		/* Instance 0 always manages the "Tx Power" and "Flags"
1341 		 * fields
1342 		 */
1343 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1344 
1345 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1346 		 * corresponds to the "connectable" instance flag.
1347 		 */
1348 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1349 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1350 
1351 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1352 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1353 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1354 			flags |= MGMT_ADV_FLAG_DISCOV;
1355 
1356 		return flags;
1357 	}
1358 
1359 	adv_instance = hci_find_adv_instance(hdev, instance);
1360 
1361 	/* Return 0 when we got an invalid instance identifier. */
1362 	if (!adv_instance)
1363 		return 0;
1364 
1365 	return adv_instance->flags;
1366 }
1367 
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)1368 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1369 {
1370 	/* If privacy is not enabled don't use RPA */
1371 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1372 		return false;
1373 
1374 	/* If basic privacy mode is enabled use RPA */
1375 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1376 		return true;
1377 
1378 	/* If limited privacy mode is enabled don't use RPA if we're
1379 	 * both discoverable and bondable.
1380 	 */
1381 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1382 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1383 		return false;
1384 
1385 	/* We're neither bondable nor discoverable in the limited
1386 	 * privacy mode, therefore use RPA.
1387 	 */
1388 	return true;
1389 }
1390 
is_advertising_allowed(struct hci_dev * hdev,bool connectable)1391 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1392 {
1393 	/* If there is no connection we are OK to advertise. */
1394 	if (hci_conn_num(hdev, LE_LINK) == 0)
1395 		return true;
1396 
1397 	/* Check le_states if there is any connection in slave role. */
1398 	if (hdev->conn_hash.le_num_slave > 0) {
1399 		/* Slave connection state and non connectable mode bit 20. */
1400 		if (!connectable && !(hdev->le_states[2] & 0x10))
1401 			return false;
1402 
1403 		/* Slave connection state and connectable mode bit 38
1404 		 * and scannable bit 21.
1405 		 */
1406 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1407 				    !(hdev->le_states[2] & 0x20)))
1408 			return false;
1409 	}
1410 
1411 	/* Check le_states if there is any connection in master role. */
1412 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1413 		/* Master connection state and non connectable mode bit 18. */
1414 		if (!connectable && !(hdev->le_states[2] & 0x02))
1415 			return false;
1416 
1417 		/* Master connection state and connectable mode bit 35 and
1418 		 * scannable 19.
1419 		 */
1420 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1421 				    !(hdev->le_states[2] & 0x08)))
1422 			return false;
1423 	}
1424 
1425 	return true;
1426 }
1427 
__hci_req_enable_advertising(struct hci_request * req)1428 void __hci_req_enable_advertising(struct hci_request *req)
1429 {
1430 	struct hci_dev *hdev = req->hdev;
1431 	struct hci_cp_le_set_adv_param cp;
1432 	u8 own_addr_type, enable = 0x01;
1433 	bool connectable;
1434 	u16 adv_min_interval, adv_max_interval;
1435 	u32 flags;
1436 
1437 	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1438 
1439 	/* If the "connectable" instance flag was not set, then choose between
1440 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1441 	 */
1442 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1443 		      mgmt_get_connectable(hdev);
1444 
1445 	if (!is_advertising_allowed(hdev, connectable))
1446 		return;
1447 
1448 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1449 		__hci_req_disable_advertising(req);
1450 
1451 	/* Clear the HCI_LE_ADV bit temporarily so that the
1452 	 * hci_update_random_address knows that it's safe to go ahead
1453 	 * and write a new random address. The flag will be set back on
1454 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1455 	 */
1456 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1457 
1458 	/* Set require_privacy to true only when non-connectable
1459 	 * advertising is used. In that case it is fine to use a
1460 	 * non-resolvable private address.
1461 	 */
1462 	if (hci_update_random_address(req, !connectable,
1463 				      adv_use_rpa(hdev, flags),
1464 				      &own_addr_type) < 0)
1465 		return;
1466 
1467 	memset(&cp, 0, sizeof(cp));
1468 
1469 	if (connectable) {
1470 		cp.type = LE_ADV_IND;
1471 
1472 		adv_min_interval = hdev->le_adv_min_interval;
1473 		adv_max_interval = hdev->le_adv_max_interval;
1474 	} else {
1475 		if (get_cur_adv_instance_scan_rsp_len(hdev))
1476 			cp.type = LE_ADV_SCAN_IND;
1477 		else
1478 			cp.type = LE_ADV_NONCONN_IND;
1479 
1480 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1481 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1482 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1483 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1484 		} else {
1485 			adv_min_interval = hdev->le_adv_min_interval;
1486 			adv_max_interval = hdev->le_adv_max_interval;
1487 		}
1488 	}
1489 
1490 	cp.min_interval = cpu_to_le16(adv_min_interval);
1491 	cp.max_interval = cpu_to_le16(adv_max_interval);
1492 	cp.own_address_type = own_addr_type;
1493 	cp.channel_map = hdev->le_adv_channel_map;
1494 
1495 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1496 
1497 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1498 }
1499 
append_local_name(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1500 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1501 {
1502 	size_t short_len;
1503 	size_t complete_len;
1504 
1505 	/* no space left for name (+ NULL + type + len) */
1506 	if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1507 		return ad_len;
1508 
1509 	/* use complete name if present and fits */
1510 	complete_len = strlen(hdev->dev_name);
1511 	if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1512 		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1513 				       hdev->dev_name, complete_len + 1);
1514 
1515 	/* use short name if present */
1516 	short_len = strlen(hdev->short_name);
1517 	if (short_len)
1518 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1519 				       hdev->short_name, short_len + 1);
1520 
1521 	/* use shortened full name if present, we already know that name
1522 	 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1523 	 */
1524 	if (complete_len) {
1525 		u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1526 
1527 		memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1528 		name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1529 
1530 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1531 				       sizeof(name));
1532 	}
1533 
1534 	return ad_len;
1535 }
1536 
append_appearance(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1537 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1538 {
1539 	return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1540 }
1541 
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)1542 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1543 {
1544 	u8 scan_rsp_len = 0;
1545 
1546 	if (hdev->appearance) {
1547 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1548 	}
1549 
1550 	return append_local_name(hdev, ptr, scan_rsp_len);
1551 }
1552 
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1553 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1554 					u8 *ptr)
1555 {
1556 	struct adv_info *adv_instance;
1557 	u32 instance_flags;
1558 	u8 scan_rsp_len = 0;
1559 
1560 	adv_instance = hci_find_adv_instance(hdev, instance);
1561 	if (!adv_instance)
1562 		return 0;
1563 
1564 	instance_flags = adv_instance->flags;
1565 
1566 	if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1567 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1568 	}
1569 
1570 	memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1571 	       adv_instance->scan_rsp_len);
1572 
1573 	scan_rsp_len += adv_instance->scan_rsp_len;
1574 
1575 	if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1576 		scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1577 
1578 	return scan_rsp_len;
1579 }
1580 
__hci_req_update_scan_rsp_data(struct hci_request * req,u8 instance)1581 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1582 {
1583 	struct hci_dev *hdev = req->hdev;
1584 	u8 len;
1585 
1586 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1587 		return;
1588 
1589 	if (ext_adv_capable(hdev)) {
1590 		struct hci_cp_le_set_ext_scan_rsp_data cp;
1591 
1592 		memset(&cp, 0, sizeof(cp));
1593 
1594 		/* Extended scan response data doesn't allow a response to be
1595 		 * set if the instance isn't scannable.
1596 		 */
1597 		if (get_adv_instance_scan_rsp_len(hdev, instance))
1598 			len = create_instance_scan_rsp_data(hdev, instance,
1599 							    cp.data);
1600 		else
1601 			len = 0;
1602 
1603 		if (hdev->scan_rsp_data_len == len &&
1604 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1605 			return;
1606 
1607 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1608 		hdev->scan_rsp_data_len = len;
1609 
1610 		cp.handle = instance;
1611 		cp.length = len;
1612 		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1613 		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1614 
1615 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1616 			    &cp);
1617 	} else {
1618 		struct hci_cp_le_set_scan_rsp_data cp;
1619 
1620 		memset(&cp, 0, sizeof(cp));
1621 
1622 		if (instance)
1623 			len = create_instance_scan_rsp_data(hdev, instance,
1624 							    cp.data);
1625 		else
1626 			len = create_default_scan_rsp_data(hdev, cp.data);
1627 
1628 		if (hdev->scan_rsp_data_len == len &&
1629 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1630 			return;
1631 
1632 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1633 		hdev->scan_rsp_data_len = len;
1634 
1635 		cp.length = len;
1636 
1637 		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1638 	}
1639 }
1640 
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1641 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1642 {
1643 	struct adv_info *adv_instance = NULL;
1644 	u8 ad_len = 0, flags = 0;
1645 	u32 instance_flags;
1646 
1647 	/* Return 0 when the current instance identifier is invalid. */
1648 	if (instance) {
1649 		adv_instance = hci_find_adv_instance(hdev, instance);
1650 		if (!adv_instance)
1651 			return 0;
1652 	}
1653 
1654 	instance_flags = get_adv_instance_flags(hdev, instance);
1655 
1656 	/* If instance already has the flags set skip adding it once
1657 	 * again.
1658 	 */
1659 	if (adv_instance && eir_get_data(adv_instance->adv_data,
1660 					 adv_instance->adv_data_len, EIR_FLAGS,
1661 					 NULL))
1662 		goto skip_flags;
1663 
1664 	/* The Add Advertising command allows userspace to set both the general
1665 	 * and limited discoverable flags.
1666 	 */
1667 	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1668 		flags |= LE_AD_GENERAL;
1669 
1670 	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1671 		flags |= LE_AD_LIMITED;
1672 
1673 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1674 		flags |= LE_AD_NO_BREDR;
1675 
1676 	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1677 		/* If a discovery flag wasn't provided, simply use the global
1678 		 * settings.
1679 		 */
1680 		if (!flags)
1681 			flags |= mgmt_get_adv_discov_flags(hdev);
1682 
1683 		/* If flags would still be empty, then there is no need to
1684 		 * include the "Flags" AD field".
1685 		 */
1686 		if (flags) {
1687 			ptr[0] = 0x02;
1688 			ptr[1] = EIR_FLAGS;
1689 			ptr[2] = flags;
1690 
1691 			ad_len += 3;
1692 			ptr += 3;
1693 		}
1694 	}
1695 
1696 skip_flags:
1697 	if (adv_instance) {
1698 		memcpy(ptr, adv_instance->adv_data,
1699 		       adv_instance->adv_data_len);
1700 		ad_len += adv_instance->adv_data_len;
1701 		ptr += adv_instance->adv_data_len;
1702 	}
1703 
1704 	if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1705 		s8 adv_tx_power;
1706 
1707 		if (ext_adv_capable(hdev)) {
1708 			if (adv_instance)
1709 				adv_tx_power = adv_instance->tx_power;
1710 			else
1711 				adv_tx_power = hdev->adv_tx_power;
1712 		} else {
1713 			adv_tx_power = hdev->adv_tx_power;
1714 		}
1715 
1716 		/* Provide Tx Power only if we can provide a valid value for it */
1717 		if (adv_tx_power != HCI_TX_POWER_INVALID) {
1718 			ptr[0] = 0x02;
1719 			ptr[1] = EIR_TX_POWER;
1720 			ptr[2] = (u8)adv_tx_power;
1721 
1722 			ad_len += 3;
1723 			ptr += 3;
1724 		}
1725 	}
1726 
1727 	return ad_len;
1728 }
1729 
__hci_req_update_adv_data(struct hci_request * req,u8 instance)1730 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1731 {
1732 	struct hci_dev *hdev = req->hdev;
1733 	u8 len;
1734 
1735 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1736 		return;
1737 
1738 	if (ext_adv_capable(hdev)) {
1739 		struct hci_cp_le_set_ext_adv_data cp;
1740 
1741 		memset(&cp, 0, sizeof(cp));
1742 
1743 		len = create_instance_adv_data(hdev, instance, cp.data);
1744 
1745 		/* There's nothing to do if the data hasn't changed */
1746 		if (hdev->adv_data_len == len &&
1747 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1748 			return;
1749 
1750 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1751 		hdev->adv_data_len = len;
1752 
1753 		cp.length = len;
1754 		cp.handle = instance;
1755 		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1756 		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1757 
1758 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1759 	} else {
1760 		struct hci_cp_le_set_adv_data cp;
1761 
1762 		memset(&cp, 0, sizeof(cp));
1763 
1764 		len = create_instance_adv_data(hdev, instance, cp.data);
1765 
1766 		/* There's nothing to do if the data hasn't changed */
1767 		if (hdev->adv_data_len == len &&
1768 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1769 			return;
1770 
1771 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1772 		hdev->adv_data_len = len;
1773 
1774 		cp.length = len;
1775 
1776 		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1777 	}
1778 }
1779 
hci_req_update_adv_data(struct hci_dev * hdev,u8 instance)1780 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1781 {
1782 	struct hci_request req;
1783 
1784 	hci_req_init(&req, hdev);
1785 	__hci_req_update_adv_data(&req, instance);
1786 
1787 	return hci_req_run(&req, NULL);
1788 }
1789 
enable_addr_resolution_complete(struct hci_dev * hdev,u8 status,u16 opcode)1790 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1791 					    u16 opcode)
1792 {
1793 	BT_DBG("%s status %u", hdev->name, status);
1794 }
1795 
hci_req_disable_address_resolution(struct hci_dev * hdev)1796 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1797 {
1798 	struct hci_request req;
1799 	__u8 enable = 0x00;
1800 
1801 	if (!use_ll_privacy(hdev) &&
1802 	    !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1803 		return;
1804 
1805 	hci_req_init(&req, hdev);
1806 
1807 	hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1808 
1809 	hci_req_run(&req, enable_addr_resolution_complete);
1810 }
1811 
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1812 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1813 {
1814 	BT_DBG("%s status %u", hdev->name, status);
1815 }
1816 
hci_req_reenable_advertising(struct hci_dev * hdev)1817 void hci_req_reenable_advertising(struct hci_dev *hdev)
1818 {
1819 	struct hci_request req;
1820 
1821 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1822 	    list_empty(&hdev->adv_instances))
1823 		return;
1824 
1825 	hci_req_init(&req, hdev);
1826 
1827 	if (hdev->cur_adv_instance) {
1828 		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1829 						true);
1830 	} else {
1831 		if (ext_adv_capable(hdev)) {
1832 			__hci_req_start_ext_adv(&req, 0x00);
1833 		} else {
1834 			__hci_req_update_adv_data(&req, 0x00);
1835 			__hci_req_update_scan_rsp_data(&req, 0x00);
1836 			__hci_req_enable_advertising(&req);
1837 		}
1838 	}
1839 
1840 	hci_req_run(&req, adv_enable_complete);
1841 }
1842 
adv_timeout_expire(struct work_struct * work)1843 static void adv_timeout_expire(struct work_struct *work)
1844 {
1845 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1846 					    adv_instance_expire.work);
1847 
1848 	struct hci_request req;
1849 	u8 instance;
1850 
1851 	BT_DBG("%s", hdev->name);
1852 
1853 	hci_dev_lock(hdev);
1854 
1855 	hdev->adv_instance_timeout = 0;
1856 
1857 	instance = hdev->cur_adv_instance;
1858 	if (instance == 0x00)
1859 		goto unlock;
1860 
1861 	hci_req_init(&req, hdev);
1862 
1863 	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1864 
1865 	if (list_empty(&hdev->adv_instances))
1866 		__hci_req_disable_advertising(&req);
1867 
1868 	hci_req_run(&req, NULL);
1869 
1870 unlock:
1871 	hci_dev_unlock(hdev);
1872 }
1873 
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)1874 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1875 			   bool use_rpa, struct adv_info *adv_instance,
1876 			   u8 *own_addr_type, bdaddr_t *rand_addr)
1877 {
1878 	int err;
1879 
1880 	bacpy(rand_addr, BDADDR_ANY);
1881 
1882 	/* If privacy is enabled use a resolvable private address. If
1883 	 * current RPA has expired then generate a new one.
1884 	 */
1885 	if (use_rpa) {
1886 		int to;
1887 
1888 		/* If Controller supports LL Privacy use own address type is
1889 		 * 0x03
1890 		 */
1891 		if (use_ll_privacy(hdev))
1892 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1893 		else
1894 			*own_addr_type = ADDR_LE_DEV_RANDOM;
1895 
1896 		if (adv_instance) {
1897 			if (!adv_instance->rpa_expired &&
1898 			    !bacmp(&adv_instance->random_addr, &hdev->rpa))
1899 				return 0;
1900 
1901 			adv_instance->rpa_expired = false;
1902 		} else {
1903 			if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1904 			    !bacmp(&hdev->random_addr, &hdev->rpa))
1905 				return 0;
1906 		}
1907 
1908 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1909 		if (err < 0) {
1910 			bt_dev_err(hdev, "failed to generate new RPA");
1911 			return err;
1912 		}
1913 
1914 		bacpy(rand_addr, &hdev->rpa);
1915 
1916 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1917 		if (adv_instance)
1918 			queue_delayed_work(hdev->workqueue,
1919 					   &adv_instance->rpa_expired_cb, to);
1920 		else
1921 			queue_delayed_work(hdev->workqueue,
1922 					   &hdev->rpa_expired, to);
1923 
1924 		return 0;
1925 	}
1926 
1927 	/* In case of required privacy without resolvable private address,
1928 	 * use an non-resolvable private address. This is useful for
1929 	 * non-connectable advertising.
1930 	 */
1931 	if (require_privacy) {
1932 		bdaddr_t nrpa;
1933 
1934 		while (true) {
1935 			/* The non-resolvable private address is generated
1936 			 * from random six bytes with the two most significant
1937 			 * bits cleared.
1938 			 */
1939 			get_random_bytes(&nrpa, 6);
1940 			nrpa.b[5] &= 0x3f;
1941 
1942 			/* The non-resolvable private address shall not be
1943 			 * equal to the public address.
1944 			 */
1945 			if (bacmp(&hdev->bdaddr, &nrpa))
1946 				break;
1947 		}
1948 
1949 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1950 		bacpy(rand_addr, &nrpa);
1951 
1952 		return 0;
1953 	}
1954 
1955 	/* No privacy so use a public address. */
1956 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1957 
1958 	return 0;
1959 }
1960 
__hci_req_clear_ext_adv_sets(struct hci_request * req)1961 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1962 {
1963 	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1964 }
1965 
__hci_req_setup_ext_adv_instance(struct hci_request * req,u8 instance)1966 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1967 {
1968 	struct hci_cp_le_set_ext_adv_params cp;
1969 	struct hci_dev *hdev = req->hdev;
1970 	bool connectable;
1971 	u32 flags;
1972 	bdaddr_t random_addr;
1973 	u8 own_addr_type;
1974 	int err;
1975 	struct adv_info *adv_instance;
1976 	bool secondary_adv;
1977 
1978 	if (instance > 0) {
1979 		adv_instance = hci_find_adv_instance(hdev, instance);
1980 		if (!adv_instance)
1981 			return -EINVAL;
1982 	} else {
1983 		adv_instance = NULL;
1984 	}
1985 
1986 	flags = get_adv_instance_flags(hdev, instance);
1987 
1988 	/* If the "connectable" instance flag was not set, then choose between
1989 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1990 	 */
1991 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1992 		      mgmt_get_connectable(hdev);
1993 
1994 	if (!is_advertising_allowed(hdev, connectable))
1995 		return -EPERM;
1996 
1997 	/* Set require_privacy to true only when non-connectable
1998 	 * advertising is used. In that case it is fine to use a
1999 	 * non-resolvable private address.
2000 	 */
2001 	err = hci_get_random_address(hdev, !connectable,
2002 				     adv_use_rpa(hdev, flags), adv_instance,
2003 				     &own_addr_type, &random_addr);
2004 	if (err < 0)
2005 		return err;
2006 
2007 	memset(&cp, 0, sizeof(cp));
2008 
2009 	/* In ext adv set param interval is 3 octets */
2010 	hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2011 	hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2012 
2013 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2014 
2015 	if (connectable) {
2016 		if (secondary_adv)
2017 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2018 		else
2019 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2020 	} else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
2021 		if (secondary_adv)
2022 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2023 		else
2024 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2025 	} else {
2026 		if (secondary_adv)
2027 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2028 		else
2029 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2030 	}
2031 
2032 	cp.own_addr_type = own_addr_type;
2033 	cp.channel_map = hdev->le_adv_channel_map;
2034 	cp.tx_power = 127;
2035 	cp.handle = instance;
2036 
2037 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
2038 		cp.primary_phy = HCI_ADV_PHY_1M;
2039 		cp.secondary_phy = HCI_ADV_PHY_2M;
2040 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2041 		cp.primary_phy = HCI_ADV_PHY_CODED;
2042 		cp.secondary_phy = HCI_ADV_PHY_CODED;
2043 	} else {
2044 		/* In all other cases use 1M */
2045 		cp.primary_phy = HCI_ADV_PHY_1M;
2046 		cp.secondary_phy = HCI_ADV_PHY_1M;
2047 	}
2048 
2049 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2050 
2051 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2052 	    bacmp(&random_addr, BDADDR_ANY)) {
2053 		struct hci_cp_le_set_adv_set_rand_addr cp;
2054 
2055 		/* Check if random address need to be updated */
2056 		if (adv_instance) {
2057 			if (!bacmp(&random_addr, &adv_instance->random_addr))
2058 				return 0;
2059 		} else {
2060 			if (!bacmp(&random_addr, &hdev->random_addr))
2061 				return 0;
2062 		}
2063 
2064 		memset(&cp, 0, sizeof(cp));
2065 
2066 		cp.handle = instance;
2067 		bacpy(&cp.bdaddr, &random_addr);
2068 
2069 		hci_req_add(req,
2070 			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2071 			    sizeof(cp), &cp);
2072 	}
2073 
2074 	return 0;
2075 }
2076 
__hci_req_enable_ext_advertising(struct hci_request * req,u8 instance)2077 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2078 {
2079 	struct hci_dev *hdev = req->hdev;
2080 	struct hci_cp_le_set_ext_adv_enable *cp;
2081 	struct hci_cp_ext_adv_set *adv_set;
2082 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2083 	struct adv_info *adv_instance;
2084 
2085 	if (instance > 0) {
2086 		adv_instance = hci_find_adv_instance(hdev, instance);
2087 		if (!adv_instance)
2088 			return -EINVAL;
2089 	} else {
2090 		adv_instance = NULL;
2091 	}
2092 
2093 	cp = (void *) data;
2094 	adv_set = (void *) cp->data;
2095 
2096 	memset(cp, 0, sizeof(*cp));
2097 
2098 	cp->enable = 0x01;
2099 	cp->num_of_sets = 0x01;
2100 
2101 	memset(adv_set, 0, sizeof(*adv_set));
2102 
2103 	adv_set->handle = instance;
2104 
2105 	/* Set duration per instance since controller is responsible for
2106 	 * scheduling it.
2107 	 */
2108 	if (adv_instance && adv_instance->duration) {
2109 		u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2110 
2111 		/* Time = N * 10 ms */
2112 		adv_set->duration = cpu_to_le16(duration / 10);
2113 	}
2114 
2115 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2116 		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2117 		    data);
2118 
2119 	return 0;
2120 }
2121 
__hci_req_disable_ext_adv_instance(struct hci_request * req,u8 instance)2122 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2123 {
2124 	struct hci_dev *hdev = req->hdev;
2125 	struct hci_cp_le_set_ext_adv_enable *cp;
2126 	struct hci_cp_ext_adv_set *adv_set;
2127 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2128 	u8 req_size;
2129 
2130 	/* If request specifies an instance that doesn't exist, fail */
2131 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2132 		return -EINVAL;
2133 
2134 	memset(data, 0, sizeof(data));
2135 
2136 	cp = (void *)data;
2137 	adv_set = (void *)cp->data;
2138 
2139 	/* Instance 0x00 indicates all advertising instances will be disabled */
2140 	cp->num_of_sets = !!instance;
2141 	cp->enable = 0x00;
2142 
2143 	adv_set->handle = instance;
2144 
2145 	req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2146 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2147 
2148 	return 0;
2149 }
2150 
__hci_req_remove_ext_adv_instance(struct hci_request * req,u8 instance)2151 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2152 {
2153 	struct hci_dev *hdev = req->hdev;
2154 
2155 	/* If request specifies an instance that doesn't exist, fail */
2156 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2157 		return -EINVAL;
2158 
2159 	hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2160 
2161 	return 0;
2162 }
2163 
__hci_req_start_ext_adv(struct hci_request * req,u8 instance)2164 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2165 {
2166 	struct hci_dev *hdev = req->hdev;
2167 	struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2168 	int err;
2169 
2170 	/* If instance isn't pending, the chip knows about it, and it's safe to
2171 	 * disable
2172 	 */
2173 	if (adv_instance && !adv_instance->pending)
2174 		__hci_req_disable_ext_adv_instance(req, instance);
2175 
2176 	err = __hci_req_setup_ext_adv_instance(req, instance);
2177 	if (err < 0)
2178 		return err;
2179 
2180 	__hci_req_update_scan_rsp_data(req, instance);
2181 	__hci_req_enable_ext_advertising(req, instance);
2182 
2183 	return 0;
2184 }
2185 
__hci_req_schedule_adv_instance(struct hci_request * req,u8 instance,bool force)2186 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2187 				    bool force)
2188 {
2189 	struct hci_dev *hdev = req->hdev;
2190 	struct adv_info *adv_instance = NULL;
2191 	u16 timeout;
2192 
2193 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2194 	    list_empty(&hdev->adv_instances))
2195 		return -EPERM;
2196 
2197 	if (hdev->adv_instance_timeout)
2198 		return -EBUSY;
2199 
2200 	adv_instance = hci_find_adv_instance(hdev, instance);
2201 	if (!adv_instance)
2202 		return -ENOENT;
2203 
2204 	/* A zero timeout means unlimited advertising. As long as there is
2205 	 * only one instance, duration should be ignored. We still set a timeout
2206 	 * in case further instances are being added later on.
2207 	 *
2208 	 * If the remaining lifetime of the instance is more than the duration
2209 	 * then the timeout corresponds to the duration, otherwise it will be
2210 	 * reduced to the remaining instance lifetime.
2211 	 */
2212 	if (adv_instance->timeout == 0 ||
2213 	    adv_instance->duration <= adv_instance->remaining_time)
2214 		timeout = adv_instance->duration;
2215 	else
2216 		timeout = adv_instance->remaining_time;
2217 
2218 	/* The remaining time is being reduced unless the instance is being
2219 	 * advertised without time limit.
2220 	 */
2221 	if (adv_instance->timeout)
2222 		adv_instance->remaining_time =
2223 				adv_instance->remaining_time - timeout;
2224 
2225 	/* Only use work for scheduling instances with legacy advertising */
2226 	if (!ext_adv_capable(hdev)) {
2227 		hdev->adv_instance_timeout = timeout;
2228 		queue_delayed_work(hdev->req_workqueue,
2229 			   &hdev->adv_instance_expire,
2230 			   msecs_to_jiffies(timeout * 1000));
2231 	}
2232 
2233 	/* If we're just re-scheduling the same instance again then do not
2234 	 * execute any HCI commands. This happens when a single instance is
2235 	 * being advertised.
2236 	 */
2237 	if (!force && hdev->cur_adv_instance == instance &&
2238 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
2239 		return 0;
2240 
2241 	hdev->cur_adv_instance = instance;
2242 	if (ext_adv_capable(hdev)) {
2243 		__hci_req_start_ext_adv(req, instance);
2244 	} else {
2245 		__hci_req_update_adv_data(req, instance);
2246 		__hci_req_update_scan_rsp_data(req, instance);
2247 		__hci_req_enable_advertising(req);
2248 	}
2249 
2250 	return 0;
2251 }
2252 
2253 /* For a single instance:
2254  * - force == true: The instance will be removed even when its remaining
2255  *   lifetime is not zero.
2256  * - force == false: the instance will be deactivated but kept stored unless
2257  *   the remaining lifetime is zero.
2258  *
2259  * For instance == 0x00:
2260  * - force == true: All instances will be removed regardless of their timeout
2261  *   setting.
2262  * - force == false: Only instances that have a timeout will be removed.
2263  */
hci_req_clear_adv_instance(struct hci_dev * hdev,struct sock * sk,struct hci_request * req,u8 instance,bool force)2264 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2265 				struct hci_request *req, u8 instance,
2266 				bool force)
2267 {
2268 	struct adv_info *adv_instance, *n, *next_instance = NULL;
2269 	int err;
2270 	u8 rem_inst;
2271 
2272 	/* Cancel any timeout concerning the removed instance(s). */
2273 	if (!instance || hdev->cur_adv_instance == instance)
2274 		cancel_adv_timeout(hdev);
2275 
2276 	/* Get the next instance to advertise BEFORE we remove
2277 	 * the current one. This can be the same instance again
2278 	 * if there is only one instance.
2279 	 */
2280 	if (instance && hdev->cur_adv_instance == instance)
2281 		next_instance = hci_get_next_instance(hdev, instance);
2282 
2283 	if (instance == 0x00) {
2284 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2285 					 list) {
2286 			if (!(force || adv_instance->timeout))
2287 				continue;
2288 
2289 			rem_inst = adv_instance->instance;
2290 			err = hci_remove_adv_instance(hdev, rem_inst);
2291 			if (!err)
2292 				mgmt_advertising_removed(sk, hdev, rem_inst);
2293 		}
2294 	} else {
2295 		adv_instance = hci_find_adv_instance(hdev, instance);
2296 
2297 		if (force || (adv_instance && adv_instance->timeout &&
2298 			      !adv_instance->remaining_time)) {
2299 			/* Don't advertise a removed instance. */
2300 			if (next_instance &&
2301 			    next_instance->instance == instance)
2302 				next_instance = NULL;
2303 
2304 			err = hci_remove_adv_instance(hdev, instance);
2305 			if (!err)
2306 				mgmt_advertising_removed(sk, hdev, instance);
2307 		}
2308 	}
2309 
2310 	if (!req || !hdev_is_powered(hdev) ||
2311 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
2312 		return;
2313 
2314 	if (next_instance && !ext_adv_capable(hdev))
2315 		__hci_req_schedule_adv_instance(req, next_instance->instance,
2316 						false);
2317 }
2318 
set_random_addr(struct hci_request * req,bdaddr_t * rpa)2319 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2320 {
2321 	struct hci_dev *hdev = req->hdev;
2322 
2323 	/* If we're advertising or initiating an LE connection we can't
2324 	 * go ahead and change the random address at this time. This is
2325 	 * because the eventual initiator address used for the
2326 	 * subsequently created connection will be undefined (some
2327 	 * controllers use the new address and others the one we had
2328 	 * when the operation started).
2329 	 *
2330 	 * In this kind of scenario skip the update and let the random
2331 	 * address be updated at the next cycle.
2332 	 */
2333 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2334 	    hci_lookup_le_connect(hdev)) {
2335 		BT_DBG("Deferring random address update");
2336 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2337 		return;
2338 	}
2339 
2340 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2341 }
2342 
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)2343 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2344 			      bool use_rpa, u8 *own_addr_type)
2345 {
2346 	struct hci_dev *hdev = req->hdev;
2347 	int err;
2348 
2349 	/* If privacy is enabled use a resolvable private address. If
2350 	 * current RPA has expired or there is something else than
2351 	 * the current RPA in use, then generate a new one.
2352 	 */
2353 	if (use_rpa) {
2354 		int to;
2355 
2356 		/* If Controller supports LL Privacy use own address type is
2357 		 * 0x03
2358 		 */
2359 		if (use_ll_privacy(hdev))
2360 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2361 		else
2362 			*own_addr_type = ADDR_LE_DEV_RANDOM;
2363 
2364 		if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2365 		    !bacmp(&hdev->random_addr, &hdev->rpa))
2366 			return 0;
2367 
2368 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2369 		if (err < 0) {
2370 			bt_dev_err(hdev, "failed to generate new RPA");
2371 			return err;
2372 		}
2373 
2374 		set_random_addr(req, &hdev->rpa);
2375 
2376 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2377 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2378 
2379 		return 0;
2380 	}
2381 
2382 	/* In case of required privacy without resolvable private address,
2383 	 * use an non-resolvable private address. This is useful for active
2384 	 * scanning and non-connectable advertising.
2385 	 */
2386 	if (require_privacy) {
2387 		bdaddr_t nrpa;
2388 
2389 		while (true) {
2390 			/* The non-resolvable private address is generated
2391 			 * from random six bytes with the two most significant
2392 			 * bits cleared.
2393 			 */
2394 			get_random_bytes(&nrpa, 6);
2395 			nrpa.b[5] &= 0x3f;
2396 
2397 			/* The non-resolvable private address shall not be
2398 			 * equal to the public address.
2399 			 */
2400 			if (bacmp(&hdev->bdaddr, &nrpa))
2401 				break;
2402 		}
2403 
2404 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2405 		set_random_addr(req, &nrpa);
2406 		return 0;
2407 	}
2408 
2409 	/* If forcing static address is in use or there is no public
2410 	 * address use the static address as random address (but skip
2411 	 * the HCI command if the current random address is already the
2412 	 * static one.
2413 	 *
2414 	 * In case BR/EDR has been disabled on a dual-mode controller
2415 	 * and a static address has been configured, then use that
2416 	 * address instead of the public BR/EDR address.
2417 	 */
2418 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2419 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2420 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2421 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2422 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2423 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
2424 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2425 				    &hdev->static_addr);
2426 		return 0;
2427 	}
2428 
2429 	/* Neither privacy nor static address is being used so use a
2430 	 * public address.
2431 	 */
2432 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2433 
2434 	return 0;
2435 }
2436 
disconnected_whitelist_entries(struct hci_dev * hdev)2437 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2438 {
2439 	struct bdaddr_list *b;
2440 
2441 	list_for_each_entry(b, &hdev->whitelist, list) {
2442 		struct hci_conn *conn;
2443 
2444 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2445 		if (!conn)
2446 			return true;
2447 
2448 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2449 			return true;
2450 	}
2451 
2452 	return false;
2453 }
2454 
__hci_req_update_scan(struct hci_request * req)2455 void __hci_req_update_scan(struct hci_request *req)
2456 {
2457 	struct hci_dev *hdev = req->hdev;
2458 	u8 scan;
2459 
2460 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2461 		return;
2462 
2463 	if (!hdev_is_powered(hdev))
2464 		return;
2465 
2466 	if (mgmt_powering_down(hdev))
2467 		return;
2468 
2469 	if (hdev->scanning_paused)
2470 		return;
2471 
2472 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2473 	    disconnected_whitelist_entries(hdev))
2474 		scan = SCAN_PAGE;
2475 	else
2476 		scan = SCAN_DISABLED;
2477 
2478 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2479 		scan |= SCAN_INQUIRY;
2480 
2481 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2482 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2483 		return;
2484 
2485 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2486 }
2487 
update_scan(struct hci_request * req,unsigned long opt)2488 static int update_scan(struct hci_request *req, unsigned long opt)
2489 {
2490 	hci_dev_lock(req->hdev);
2491 	__hci_req_update_scan(req);
2492 	hci_dev_unlock(req->hdev);
2493 	return 0;
2494 }
2495 
scan_update_work(struct work_struct * work)2496 static void scan_update_work(struct work_struct *work)
2497 {
2498 	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2499 
2500 	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2501 }
2502 
connectable_update(struct hci_request * req,unsigned long opt)2503 static int connectable_update(struct hci_request *req, unsigned long opt)
2504 {
2505 	struct hci_dev *hdev = req->hdev;
2506 
2507 	hci_dev_lock(hdev);
2508 
2509 	__hci_req_update_scan(req);
2510 
2511 	/* If BR/EDR is not enabled and we disable advertising as a
2512 	 * by-product of disabling connectable, we need to update the
2513 	 * advertising flags.
2514 	 */
2515 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2516 		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
2517 
2518 	/* Update the advertising parameters if necessary */
2519 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2520 	    !list_empty(&hdev->adv_instances)) {
2521 		if (ext_adv_capable(hdev))
2522 			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2523 		else
2524 			__hci_req_enable_advertising(req);
2525 	}
2526 
2527 	__hci_update_background_scan(req);
2528 
2529 	hci_dev_unlock(hdev);
2530 
2531 	return 0;
2532 }
2533 
connectable_update_work(struct work_struct * work)2534 static void connectable_update_work(struct work_struct *work)
2535 {
2536 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2537 					    connectable_update);
2538 	u8 status;
2539 
2540 	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2541 	mgmt_set_connectable_complete(hdev, status);
2542 }
2543 
get_service_classes(struct hci_dev * hdev)2544 static u8 get_service_classes(struct hci_dev *hdev)
2545 {
2546 	struct bt_uuid *uuid;
2547 	u8 val = 0;
2548 
2549 	list_for_each_entry(uuid, &hdev->uuids, list)
2550 		val |= uuid->svc_hint;
2551 
2552 	return val;
2553 }
2554 
__hci_req_update_class(struct hci_request * req)2555 void __hci_req_update_class(struct hci_request *req)
2556 {
2557 	struct hci_dev *hdev = req->hdev;
2558 	u8 cod[3];
2559 
2560 	BT_DBG("%s", hdev->name);
2561 
2562 	if (!hdev_is_powered(hdev))
2563 		return;
2564 
2565 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2566 		return;
2567 
2568 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2569 		return;
2570 
2571 	cod[0] = hdev->minor_class;
2572 	cod[1] = hdev->major_class;
2573 	cod[2] = get_service_classes(hdev);
2574 
2575 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2576 		cod[1] |= 0x20;
2577 
2578 	if (memcmp(cod, hdev->dev_class, 3) == 0)
2579 		return;
2580 
2581 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2582 }
2583 
write_iac(struct hci_request * req)2584 static void write_iac(struct hci_request *req)
2585 {
2586 	struct hci_dev *hdev = req->hdev;
2587 	struct hci_cp_write_current_iac_lap cp;
2588 
2589 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2590 		return;
2591 
2592 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2593 		/* Limited discoverable mode */
2594 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
2595 		cp.iac_lap[0] = 0x00;	/* LIAC */
2596 		cp.iac_lap[1] = 0x8b;
2597 		cp.iac_lap[2] = 0x9e;
2598 		cp.iac_lap[3] = 0x33;	/* GIAC */
2599 		cp.iac_lap[4] = 0x8b;
2600 		cp.iac_lap[5] = 0x9e;
2601 	} else {
2602 		/* General discoverable mode */
2603 		cp.num_iac = 1;
2604 		cp.iac_lap[0] = 0x33;	/* GIAC */
2605 		cp.iac_lap[1] = 0x8b;
2606 		cp.iac_lap[2] = 0x9e;
2607 	}
2608 
2609 	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2610 		    (cp.num_iac * 3) + 1, &cp);
2611 }
2612 
discoverable_update(struct hci_request * req,unsigned long opt)2613 static int discoverable_update(struct hci_request *req, unsigned long opt)
2614 {
2615 	struct hci_dev *hdev = req->hdev;
2616 
2617 	hci_dev_lock(hdev);
2618 
2619 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2620 		write_iac(req);
2621 		__hci_req_update_scan(req);
2622 		__hci_req_update_class(req);
2623 	}
2624 
2625 	/* Advertising instances don't use the global discoverable setting, so
2626 	 * only update AD if advertising was enabled using Set Advertising.
2627 	 */
2628 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2629 		__hci_req_update_adv_data(req, 0x00);
2630 
2631 		/* Discoverable mode affects the local advertising
2632 		 * address in limited privacy mode.
2633 		 */
2634 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2635 			if (ext_adv_capable(hdev))
2636 				__hci_req_start_ext_adv(req, 0x00);
2637 			else
2638 				__hci_req_enable_advertising(req);
2639 		}
2640 	}
2641 
2642 	hci_dev_unlock(hdev);
2643 
2644 	return 0;
2645 }
2646 
discoverable_update_work(struct work_struct * work)2647 static void discoverable_update_work(struct work_struct *work)
2648 {
2649 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2650 					    discoverable_update);
2651 	u8 status;
2652 
2653 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2654 	mgmt_set_discoverable_complete(hdev, status);
2655 }
2656 
__hci_abort_conn(struct hci_request * req,struct hci_conn * conn,u8 reason)2657 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2658 		      u8 reason)
2659 {
2660 	switch (conn->state) {
2661 	case BT_CONNECTED:
2662 	case BT_CONFIG:
2663 		if (conn->type == AMP_LINK) {
2664 			struct hci_cp_disconn_phy_link cp;
2665 
2666 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2667 			cp.reason = reason;
2668 			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2669 				    &cp);
2670 		} else {
2671 			struct hci_cp_disconnect dc;
2672 
2673 			dc.handle = cpu_to_le16(conn->handle);
2674 			dc.reason = reason;
2675 			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2676 		}
2677 
2678 		conn->state = BT_DISCONN;
2679 
2680 		break;
2681 	case BT_CONNECT:
2682 		if (conn->type == LE_LINK) {
2683 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2684 				break;
2685 			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2686 				    0, NULL);
2687 		} else if (conn->type == ACL_LINK) {
2688 			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2689 				break;
2690 			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2691 				    6, &conn->dst);
2692 		}
2693 		break;
2694 	case BT_CONNECT2:
2695 		if (conn->type == ACL_LINK) {
2696 			struct hci_cp_reject_conn_req rej;
2697 
2698 			bacpy(&rej.bdaddr, &conn->dst);
2699 			rej.reason = reason;
2700 
2701 			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2702 				    sizeof(rej), &rej);
2703 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2704 			struct hci_cp_reject_sync_conn_req rej;
2705 
2706 			bacpy(&rej.bdaddr, &conn->dst);
2707 
2708 			/* SCO rejection has its own limited set of
2709 			 * allowed error values (0x0D-0x0F) which isn't
2710 			 * compatible with most values passed to this
2711 			 * function. To be safe hard-code one of the
2712 			 * values that's suitable for SCO.
2713 			 */
2714 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2715 
2716 			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2717 				    sizeof(rej), &rej);
2718 		}
2719 		break;
2720 	default:
2721 		conn->state = BT_CLOSED;
2722 		break;
2723 	}
2724 }
2725 
abort_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)2726 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2727 {
2728 	if (status)
2729 		BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2730 }
2731 
hci_abort_conn(struct hci_conn * conn,u8 reason)2732 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2733 {
2734 	struct hci_request req;
2735 	int err;
2736 
2737 	hci_req_init(&req, conn->hdev);
2738 
2739 	__hci_abort_conn(&req, conn, reason);
2740 
2741 	err = hci_req_run(&req, abort_conn_complete);
2742 	if (err && err != -ENODATA) {
2743 		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2744 		return err;
2745 	}
2746 
2747 	return 0;
2748 }
2749 
update_bg_scan(struct hci_request * req,unsigned long opt)2750 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2751 {
2752 	hci_dev_lock(req->hdev);
2753 	__hci_update_background_scan(req);
2754 	hci_dev_unlock(req->hdev);
2755 	return 0;
2756 }
2757 
bg_scan_update(struct work_struct * work)2758 static void bg_scan_update(struct work_struct *work)
2759 {
2760 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2761 					    bg_scan_update);
2762 	struct hci_conn *conn;
2763 	u8 status;
2764 	int err;
2765 
2766 	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2767 	if (!err)
2768 		return;
2769 
2770 	hci_dev_lock(hdev);
2771 
2772 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2773 	if (conn)
2774 		hci_le_conn_failed(conn, status);
2775 
2776 	hci_dev_unlock(hdev);
2777 }
2778 
le_scan_disable(struct hci_request * req,unsigned long opt)2779 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2780 {
2781 	hci_req_add_le_scan_disable(req, false);
2782 	return 0;
2783 }
2784 
bredr_inquiry(struct hci_request * req,unsigned long opt)2785 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2786 {
2787 	u8 length = opt;
2788 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2789 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2790 	struct hci_cp_inquiry cp;
2791 
2792 	BT_DBG("%s", req->hdev->name);
2793 
2794 	hci_dev_lock(req->hdev);
2795 	hci_inquiry_cache_flush(req->hdev);
2796 	hci_dev_unlock(req->hdev);
2797 
2798 	memset(&cp, 0, sizeof(cp));
2799 
2800 	if (req->hdev->discovery.limited)
2801 		memcpy(&cp.lap, liac, sizeof(cp.lap));
2802 	else
2803 		memcpy(&cp.lap, giac, sizeof(cp.lap));
2804 
2805 	cp.length = length;
2806 
2807 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2808 
2809 	return 0;
2810 }
2811 
le_scan_disable_work(struct work_struct * work)2812 static void le_scan_disable_work(struct work_struct *work)
2813 {
2814 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2815 					    le_scan_disable.work);
2816 	u8 status;
2817 
2818 	BT_DBG("%s", hdev->name);
2819 
2820 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2821 		return;
2822 
2823 	cancel_delayed_work(&hdev->le_scan_restart);
2824 
2825 	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2826 	if (status) {
2827 		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2828 			   status);
2829 		return;
2830 	}
2831 
2832 	hdev->discovery.scan_start = 0;
2833 
2834 	/* If we were running LE only scan, change discovery state. If
2835 	 * we were running both LE and BR/EDR inquiry simultaneously,
2836 	 * and BR/EDR inquiry is already finished, stop discovery,
2837 	 * otherwise BR/EDR inquiry will stop discovery when finished.
2838 	 * If we will resolve remote device name, do not change
2839 	 * discovery state.
2840 	 */
2841 
2842 	if (hdev->discovery.type == DISCOV_TYPE_LE)
2843 		goto discov_stopped;
2844 
2845 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2846 		return;
2847 
2848 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2849 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2850 		    hdev->discovery.state != DISCOVERY_RESOLVING)
2851 			goto discov_stopped;
2852 
2853 		return;
2854 	}
2855 
2856 	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2857 		     HCI_CMD_TIMEOUT, &status);
2858 	if (status) {
2859 		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2860 		goto discov_stopped;
2861 	}
2862 
2863 	return;
2864 
2865 discov_stopped:
2866 	hci_dev_lock(hdev);
2867 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2868 	hci_dev_unlock(hdev);
2869 }
2870 
le_scan_restart(struct hci_request * req,unsigned long opt)2871 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2872 {
2873 	struct hci_dev *hdev = req->hdev;
2874 
2875 	/* If controller is not scanning we are done. */
2876 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2877 		return 0;
2878 
2879 	if (hdev->scanning_paused) {
2880 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2881 		return 0;
2882 	}
2883 
2884 	hci_req_add_le_scan_disable(req, false);
2885 
2886 	if (use_ext_scan(hdev)) {
2887 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2888 
2889 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2890 		ext_enable_cp.enable = LE_SCAN_ENABLE;
2891 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2892 
2893 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2894 			    sizeof(ext_enable_cp), &ext_enable_cp);
2895 	} else {
2896 		struct hci_cp_le_set_scan_enable cp;
2897 
2898 		memset(&cp, 0, sizeof(cp));
2899 		cp.enable = LE_SCAN_ENABLE;
2900 		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2901 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2902 	}
2903 
2904 	return 0;
2905 }
2906 
le_scan_restart_work(struct work_struct * work)2907 static void le_scan_restart_work(struct work_struct *work)
2908 {
2909 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2910 					    le_scan_restart.work);
2911 	unsigned long timeout, duration, scan_start, now;
2912 	u8 status;
2913 
2914 	BT_DBG("%s", hdev->name);
2915 
2916 	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2917 	if (status) {
2918 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
2919 			   status);
2920 		return;
2921 	}
2922 
2923 	hci_dev_lock(hdev);
2924 
2925 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2926 	    !hdev->discovery.scan_start)
2927 		goto unlock;
2928 
2929 	/* When the scan was started, hdev->le_scan_disable has been queued
2930 	 * after duration from scan_start. During scan restart this job
2931 	 * has been canceled, and we need to queue it again after proper
2932 	 * timeout, to make sure that scan does not run indefinitely.
2933 	 */
2934 	duration = hdev->discovery.scan_duration;
2935 	scan_start = hdev->discovery.scan_start;
2936 	now = jiffies;
2937 	if (now - scan_start <= duration) {
2938 		int elapsed;
2939 
2940 		if (now >= scan_start)
2941 			elapsed = now - scan_start;
2942 		else
2943 			elapsed = ULONG_MAX - scan_start + now;
2944 
2945 		timeout = duration - elapsed;
2946 	} else {
2947 		timeout = 0;
2948 	}
2949 
2950 	queue_delayed_work(hdev->req_workqueue,
2951 			   &hdev->le_scan_disable, timeout);
2952 
2953 unlock:
2954 	hci_dev_unlock(hdev);
2955 }
2956 
active_scan(struct hci_request * req,unsigned long opt)2957 static int active_scan(struct hci_request *req, unsigned long opt)
2958 {
2959 	uint16_t interval = opt;
2960 	struct hci_dev *hdev = req->hdev;
2961 	u8 own_addr_type;
2962 	/* White list is not used for discovery */
2963 	u8 filter_policy = 0x00;
2964 	/* Discovery doesn't require controller address resolution */
2965 	bool addr_resolv = false;
2966 	int err;
2967 
2968 	BT_DBG("%s", hdev->name);
2969 
2970 	/* If controller is scanning, it means the background scanning is
2971 	 * running. Thus, we should temporarily stop it in order to set the
2972 	 * discovery scanning parameters.
2973 	 */
2974 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2975 		hci_req_add_le_scan_disable(req, false);
2976 
2977 	/* All active scans will be done with either a resolvable private
2978 	 * address (when privacy feature has been enabled) or non-resolvable
2979 	 * private address.
2980 	 */
2981 	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2982 					&own_addr_type);
2983 	if (err < 0)
2984 		own_addr_type = ADDR_LE_DEV_PUBLIC;
2985 
2986 	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2987 			   hdev->le_scan_window_discovery, own_addr_type,
2988 			   filter_policy, addr_resolv);
2989 	return 0;
2990 }
2991 
interleaved_discov(struct hci_request * req,unsigned long opt)2992 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2993 {
2994 	int err;
2995 
2996 	BT_DBG("%s", req->hdev->name);
2997 
2998 	err = active_scan(req, opt);
2999 	if (err)
3000 		return err;
3001 
3002 	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3003 }
3004 
start_discovery(struct hci_dev * hdev,u8 * status)3005 static void start_discovery(struct hci_dev *hdev, u8 *status)
3006 {
3007 	unsigned long timeout;
3008 
3009 	BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
3010 
3011 	switch (hdev->discovery.type) {
3012 	case DISCOV_TYPE_BREDR:
3013 		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3014 			hci_req_sync(hdev, bredr_inquiry,
3015 				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3016 				     status);
3017 		return;
3018 	case DISCOV_TYPE_INTERLEAVED:
3019 		/* When running simultaneous discovery, the LE scanning time
3020 		 * should occupy the whole discovery time sine BR/EDR inquiry
3021 		 * and LE scanning are scheduled by the controller.
3022 		 *
3023 		 * For interleaving discovery in comparison, BR/EDR inquiry
3024 		 * and LE scanning are done sequentially with separate
3025 		 * timeouts.
3026 		 */
3027 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3028 			     &hdev->quirks)) {
3029 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3030 			/* During simultaneous discovery, we double LE scan
3031 			 * interval. We must leave some time for the controller
3032 			 * to do BR/EDR inquiry.
3033 			 */
3034 			hci_req_sync(hdev, interleaved_discov,
3035 				     hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3036 				     status);
3037 			break;
3038 		}
3039 
3040 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3041 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3042 			     HCI_CMD_TIMEOUT, status);
3043 		break;
3044 	case DISCOV_TYPE_LE:
3045 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3046 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3047 			     HCI_CMD_TIMEOUT, status);
3048 		break;
3049 	default:
3050 		*status = HCI_ERROR_UNSPECIFIED;
3051 		return;
3052 	}
3053 
3054 	if (*status)
3055 		return;
3056 
3057 	BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
3058 
3059 	/* When service discovery is used and the controller has a
3060 	 * strict duplicate filter, it is important to remember the
3061 	 * start and duration of the scan. This is required for
3062 	 * restarting scanning during the discovery phase.
3063 	 */
3064 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3065 		     hdev->discovery.result_filtering) {
3066 		hdev->discovery.scan_start = jiffies;
3067 		hdev->discovery.scan_duration = timeout;
3068 	}
3069 
3070 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3071 			   timeout);
3072 }
3073 
hci_req_stop_discovery(struct hci_request * req)3074 bool hci_req_stop_discovery(struct hci_request *req)
3075 {
3076 	struct hci_dev *hdev = req->hdev;
3077 	struct discovery_state *d = &hdev->discovery;
3078 	struct hci_cp_remote_name_req_cancel cp;
3079 	struct inquiry_entry *e;
3080 	bool ret = false;
3081 
3082 	BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3083 
3084 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3085 		if (test_bit(HCI_INQUIRY, &hdev->flags))
3086 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3087 
3088 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3089 			cancel_delayed_work(&hdev->le_scan_disable);
3090 			hci_req_add_le_scan_disable(req, false);
3091 		}
3092 
3093 		ret = true;
3094 	} else {
3095 		/* Passive scanning */
3096 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3097 			hci_req_add_le_scan_disable(req, false);
3098 			ret = true;
3099 		}
3100 	}
3101 
3102 	/* No further actions needed for LE-only discovery */
3103 	if (d->type == DISCOV_TYPE_LE)
3104 		return ret;
3105 
3106 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3107 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3108 						     NAME_PENDING);
3109 		if (!e)
3110 			return ret;
3111 
3112 		bacpy(&cp.bdaddr, &e->data.bdaddr);
3113 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3114 			    &cp);
3115 		ret = true;
3116 	}
3117 
3118 	return ret;
3119 }
3120 
stop_discovery(struct hci_request * req,unsigned long opt)3121 static int stop_discovery(struct hci_request *req, unsigned long opt)
3122 {
3123 	hci_dev_lock(req->hdev);
3124 	hci_req_stop_discovery(req);
3125 	hci_dev_unlock(req->hdev);
3126 
3127 	return 0;
3128 }
3129 
discov_update(struct work_struct * work)3130 static void discov_update(struct work_struct *work)
3131 {
3132 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3133 					    discov_update);
3134 	u8 status = 0;
3135 
3136 	switch (hdev->discovery.state) {
3137 	case DISCOVERY_STARTING:
3138 		start_discovery(hdev, &status);
3139 		mgmt_start_discovery_complete(hdev, status);
3140 		if (status)
3141 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3142 		else
3143 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3144 		break;
3145 	case DISCOVERY_STOPPING:
3146 		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3147 		mgmt_stop_discovery_complete(hdev, status);
3148 		if (!status)
3149 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3150 		break;
3151 	case DISCOVERY_STOPPED:
3152 	default:
3153 		return;
3154 	}
3155 }
3156 
discov_off(struct work_struct * work)3157 static void discov_off(struct work_struct *work)
3158 {
3159 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3160 					    discov_off.work);
3161 
3162 	BT_DBG("%s", hdev->name);
3163 
3164 	hci_dev_lock(hdev);
3165 
3166 	/* When discoverable timeout triggers, then just make sure
3167 	 * the limited discoverable flag is cleared. Even in the case
3168 	 * of a timeout triggered from general discoverable, it is
3169 	 * safe to unconditionally clear the flag.
3170 	 */
3171 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3172 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3173 	hdev->discov_timeout = 0;
3174 
3175 	hci_dev_unlock(hdev);
3176 
3177 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3178 	mgmt_new_settings(hdev);
3179 }
3180 
powered_update_hci(struct hci_request * req,unsigned long opt)3181 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3182 {
3183 	struct hci_dev *hdev = req->hdev;
3184 	u8 link_sec;
3185 
3186 	hci_dev_lock(hdev);
3187 
3188 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3189 	    !lmp_host_ssp_capable(hdev)) {
3190 		u8 mode = 0x01;
3191 
3192 		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3193 
3194 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3195 			u8 support = 0x01;
3196 
3197 			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3198 				    sizeof(support), &support);
3199 		}
3200 	}
3201 
3202 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3203 	    lmp_bredr_capable(hdev)) {
3204 		struct hci_cp_write_le_host_supported cp;
3205 
3206 		cp.le = 0x01;
3207 		cp.simul = 0x00;
3208 
3209 		/* Check first if we already have the right
3210 		 * host state (host features set)
3211 		 */
3212 		if (cp.le != lmp_host_le_capable(hdev) ||
3213 		    cp.simul != lmp_host_le_br_capable(hdev))
3214 			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3215 				    sizeof(cp), &cp);
3216 	}
3217 
3218 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3219 		/* Make sure the controller has a good default for
3220 		 * advertising data. This also applies to the case
3221 		 * where BR/EDR was toggled during the AUTO_OFF phase.
3222 		 */
3223 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3224 		    list_empty(&hdev->adv_instances)) {
3225 			int err;
3226 
3227 			if (ext_adv_capable(hdev)) {
3228 				err = __hci_req_setup_ext_adv_instance(req,
3229 								       0x00);
3230 				if (!err)
3231 					__hci_req_update_scan_rsp_data(req,
3232 								       0x00);
3233 			} else {
3234 				err = 0;
3235 				__hci_req_update_adv_data(req, 0x00);
3236 				__hci_req_update_scan_rsp_data(req, 0x00);
3237 			}
3238 
3239 			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3240 				if (!ext_adv_capable(hdev))
3241 					__hci_req_enable_advertising(req);
3242 				else if (!err)
3243 					__hci_req_enable_ext_advertising(req,
3244 									 0x00);
3245 			}
3246 		} else if (!list_empty(&hdev->adv_instances)) {
3247 			struct adv_info *adv_instance;
3248 
3249 			adv_instance = list_first_entry(&hdev->adv_instances,
3250 							struct adv_info, list);
3251 			__hci_req_schedule_adv_instance(req,
3252 							adv_instance->instance,
3253 							true);
3254 		}
3255 	}
3256 
3257 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3258 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3259 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3260 			    sizeof(link_sec), &link_sec);
3261 
3262 	if (lmp_bredr_capable(hdev)) {
3263 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3264 			__hci_req_write_fast_connectable(req, true);
3265 		else
3266 			__hci_req_write_fast_connectable(req, false);
3267 		__hci_req_update_scan(req);
3268 		__hci_req_update_class(req);
3269 		__hci_req_update_name(req);
3270 		__hci_req_update_eir(req);
3271 	}
3272 
3273 	hci_dev_unlock(hdev);
3274 	return 0;
3275 }
3276 
__hci_req_hci_power_on(struct hci_dev * hdev)3277 int __hci_req_hci_power_on(struct hci_dev *hdev)
3278 {
3279 	/* Register the available SMP channels (BR/EDR and LE) only when
3280 	 * successfully powering on the controller. This late
3281 	 * registration is required so that LE SMP can clearly decide if
3282 	 * the public address or static address is used.
3283 	 */
3284 	smp_register(hdev);
3285 
3286 	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3287 			      NULL);
3288 }
3289 
hci_request_setup(struct hci_dev * hdev)3290 void hci_request_setup(struct hci_dev *hdev)
3291 {
3292 	INIT_WORK(&hdev->discov_update, discov_update);
3293 	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3294 	INIT_WORK(&hdev->scan_update, scan_update_work);
3295 	INIT_WORK(&hdev->connectable_update, connectable_update_work);
3296 	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3297 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3298 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3299 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3300 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3301 }
3302 
hci_request_cancel_all(struct hci_dev * hdev)3303 void hci_request_cancel_all(struct hci_dev *hdev)
3304 {
3305 	hci_req_sync_cancel(hdev, ENODEV);
3306 
3307 	cancel_work_sync(&hdev->discov_update);
3308 	cancel_work_sync(&hdev->bg_scan_update);
3309 	cancel_work_sync(&hdev->scan_update);
3310 	cancel_work_sync(&hdev->connectable_update);
3311 	cancel_work_sync(&hdev->discoverable_update);
3312 	cancel_delayed_work_sync(&hdev->discov_off);
3313 	cancel_delayed_work_sync(&hdev->le_scan_disable);
3314 	cancel_delayed_work_sync(&hdev->le_scan_restart);
3315 
3316 	if (hdev->adv_instance_timeout) {
3317 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
3318 		hdev->adv_instance_timeout = 0;
3319 	}
3320 }
3321