Lines Matching +full:monitor +full:- +full:interval +full:- +full:ms

2    BlueZ - Bluetooth protocol stack for Linux
40 skb_queue_head_init(&req->cmd_q); in hci_req_init()
41 req->hdev = hdev; in hci_req_init()
42 req->err = 0; in hci_req_init()
47 skb_queue_purge(&req->cmd_q); in hci_req_purge()
52 return hdev->req_status == HCI_REQ_PEND; in hci_req_status_pend()
58 struct hci_dev *hdev = req->hdev; in req_run()
62 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); in req_run()
67 if (req->err) { in req_run()
68 skb_queue_purge(&req->cmd_q); in req_run()
69 return req->err; in req_run()
73 if (skb_queue_empty(&req->cmd_q)) in req_run()
74 return -ENODATA; in req_run()
76 skb = skb_peek_tail(&req->cmd_q); in req_run()
78 bt_cb(skb)->hci.req_complete = complete; in req_run()
80 bt_cb(skb)->hci.req_complete_skb = complete_skb; in req_run()
81 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; in req_run()
84 spin_lock_irqsave(&hdev->cmd_q.lock, flags); in req_run()
85 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); in req_run()
86 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); in req_run()
88 queue_work(hdev->workqueue, &hdev->cmd_work); in req_run()
108 if (hdev->req_status == HCI_REQ_PEND) { in hci_req_sync_complete()
109 hdev->req_result = result; in hci_req_sync_complete()
110 hdev->req_status = HCI_REQ_DONE; in hci_req_sync_complete()
112 hdev->req_skb = skb_get(skb); in hci_req_sync_complete()
113 wake_up_interruptible(&hdev->req_wait_q); in hci_req_sync_complete()
121 if (hdev->req_status == HCI_REQ_PEND) { in hci_req_sync_cancel()
122 hdev->req_result = err; in hci_req_sync_cancel()
123 hdev->req_status = HCI_REQ_CANCELED; in hci_req_sync_cancel()
124 wake_up_interruptible(&hdev->req_wait_q); in hci_req_sync_cancel()
141 hdev->req_status = HCI_REQ_PEND; in __hci_cmd_sync_ev()
147 err = wait_event_interruptible_timeout(hdev->req_wait_q, in __hci_cmd_sync_ev()
148 hdev->req_status != HCI_REQ_PEND, timeout); in __hci_cmd_sync_ev()
150 if (err == -ERESTARTSYS) in __hci_cmd_sync_ev()
151 return ERR_PTR(-EINTR); in __hci_cmd_sync_ev()
153 switch (hdev->req_status) { in __hci_cmd_sync_ev()
155 err = -bt_to_errno(hdev->req_result); in __hci_cmd_sync_ev()
159 err = -hdev->req_result; in __hci_cmd_sync_ev()
163 err = -ETIMEDOUT; in __hci_cmd_sync_ev()
167 hdev->req_status = hdev->req_result = 0; in __hci_cmd_sync_ev()
168 skb = hdev->req_skb; in __hci_cmd_sync_ev()
169 hdev->req_skb = NULL; in __hci_cmd_sync_ev()
179 return ERR_PTR(-ENODATA); in __hci_cmd_sync_ev()
204 hdev->req_status = HCI_REQ_PEND; in __hci_req_sync()
215 hdev->req_status = 0; in __hci_req_sync()
222 if (err == -ENODATA) { in __hci_req_sync()
234 err = wait_event_interruptible_timeout(hdev->req_wait_q, in __hci_req_sync()
235 hdev->req_status != HCI_REQ_PEND, timeout); in __hci_req_sync()
237 if (err == -ERESTARTSYS) in __hci_req_sync()
238 return -EINTR; in __hci_req_sync()
240 switch (hdev->req_status) { in __hci_req_sync()
242 err = -bt_to_errno(hdev->req_result); in __hci_req_sync()
244 *hci_status = hdev->req_result; in __hci_req_sync()
248 err = -hdev->req_result; in __hci_req_sync()
254 err = -ETIMEDOUT; in __hci_req_sync()
260 kfree_skb(hdev->req_skb); in __hci_req_sync()
261 hdev->req_skb = NULL; in __hci_req_sync()
262 hdev->req_status = hdev->req_result = 0; in __hci_req_sync()
281 if (test_bit(HCI_UP, &hdev->flags)) in hci_req_sync()
284 ret = -ENETDOWN; in hci_req_sync()
302 hdr->opcode = cpu_to_le16(opcode); in hci_prepare_cmd()
303 hdr->plen = plen; in hci_prepare_cmd()
308 bt_dev_dbg(hdev, "skb len %d", skb->len); in hci_prepare_cmd()
320 struct hci_dev *hdev = req->hdev; in hci_req_add_ev()
328 if (req->err) in hci_req_add_ev()
335 req->err = -ENOMEM; in hci_req_add_ev()
339 if (skb_queue_empty(&req->cmd_q)) in hci_req_add_ev()
340 bt_cb(skb)->hci.req_flags |= HCI_REQ_START; in hci_req_add_ev()
342 bt_cb(skb)->hci.req_event = event; in hci_req_add_ev()
344 skb_queue_tail(&req->cmd_q, skb); in hci_req_add_ev()
355 struct hci_dev *hdev = req->hdev; in __hci_req_write_fast_connectable()
362 if (hdev->hci_ver < BLUETOOTH_VER_1_2) in __hci_req_write_fast_connectable()
368 /* 160 msec page scan interval */ in __hci_req_write_fast_connectable()
369 acp.interval = cpu_to_le16(0x0100); in __hci_req_write_fast_connectable()
371 type = hdev->def_page_scan_type; in __hci_req_write_fast_connectable()
372 acp.interval = cpu_to_le16(hdev->def_page_scan_int); in __hci_req_write_fast_connectable()
375 acp.window = cpu_to_le16(hdev->def_page_scan_window); in __hci_req_write_fast_connectable()
377 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || in __hci_req_write_fast_connectable()
378 __cpu_to_le16(hdev->page_scan_window) != acp.window) in __hci_req_write_fast_connectable()
382 if (hdev->page_scan_type != type) in __hci_req_write_fast_connectable()
388 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; in start_interleave_scan()
389 queue_delayed_work(hdev->req_workqueue, in start_interleave_scan()
390 &hdev->interleave_scan, 0); in start_interleave_scan()
395 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; in is_interleave_scanning()
402 cancel_delayed_work_sync(&hdev->interleave_scan); in cancel_interleave_scan()
404 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; in cancel_interleave_scan()
413 * - There is at least one ADV monitor in __hci_update_interleaved_scan()
414 * - At least one pending LE connection or one device to be scanned for in __hci_update_interleaved_scan()
415 * - Monitor offloading is not supported in __hci_update_interleaved_scan()
420 !(list_empty(&hdev->pend_le_conns) && in __hci_update_interleaved_scan()
421 list_empty(&hdev->pend_le_reports)) && in __hci_update_interleaved_scan()
438 /* This function controls the background scanning based on hdev->pend_le_conns
442 * This function requires the caller holds hdev->lock.
446 struct hci_dev *hdev = req->hdev; in __hci_update_background_scan()
448 if (!test_bit(HCI_UP, &hdev->flags) || in __hci_update_background_scan()
449 test_bit(HCI_INIT, &hdev->flags) || in __hci_update_background_scan()
461 if (hdev->discovery.state != DISCOVERY_STOPPED) in __hci_update_background_scan()
476 if (list_empty(&hdev->pend_le_conns) && in __hci_update_background_scan()
477 list_empty(&hdev->pend_le_reports) && in __hci_update_background_scan()
516 struct hci_dev *hdev = req->hdev; in __hci_req_update_name()
519 memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); in __hci_req_update_name()
534 list_for_each_entry(uuid, &hdev->uuids, list) { in create_uuid16_list()
537 if (uuid->size != 16) in create_uuid16_list()
540 uuid16 = get_unaligned_le16(&uuid->uuid[12]); in create_uuid16_list()
555 if ((ptr - data) + sizeof(u16) > len) { in create_uuid16_list()
576 list_for_each_entry(uuid, &hdev->uuids, list) { in create_uuid32_list()
577 if (uuid->size != 32) in create_uuid32_list()
588 if ((ptr - data) + sizeof(u32) > len) { in create_uuid32_list()
593 memcpy(ptr, &uuid->uuid[12], sizeof(u32)); in create_uuid32_list()
609 list_for_each_entry(uuid, &hdev->uuids, list) { in create_uuid128_list()
610 if (uuid->size != 128) in create_uuid128_list()
621 if ((ptr - data) + 16 > len) { in create_uuid128_list()
626 memcpy(ptr, uuid->uuid, 16); in create_uuid128_list()
639 name_len = strlen(hdev->dev_name); in create_eir()
652 memcpy(ptr + 2, hdev->dev_name, name_len); in create_eir()
657 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { in create_eir()
660 ptr[2] = (u8) hdev->inq_tx_power; in create_eir()
665 if (hdev->devid_source > 0) { in create_eir()
669 put_unaligned_le16(hdev->devid_source, ptr + 2); in create_eir()
670 put_unaligned_le16(hdev->devid_vendor, ptr + 4); in create_eir()
671 put_unaligned_le16(hdev->devid_product, ptr + 6); in create_eir()
672 put_unaligned_le16(hdev->devid_version, ptr + 8); in create_eir()
677 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); in create_eir()
678 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); in create_eir()
679 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); in create_eir()
684 struct hci_dev *hdev = req->hdev; in __hci_req_update_eir()
703 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) in __hci_req_update_eir()
706 memcpy(hdev->eir, cp.data, sizeof(cp.data)); in __hci_req_update_eir()
713 struct hci_dev *hdev = req->hdev; in hci_req_add_le_scan_disable()
715 if (hdev->scanning_paused) { in hci_req_add_le_scan_disable()
720 if (hdev->suspended) in hci_req_add_le_scan_disable()
721 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); in hci_req_add_le_scan_disable()
756 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr, in del_from_accept_list()
760 if (use_ll_privacy(req->hdev) && in del_from_accept_list()
761 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) { in del_from_accept_list()
764 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); in del_from_accept_list()
777 /* Adds connection to accept list if needed. On error, returns -1. */
783 struct hci_dev *hdev = req->hdev; in add_to_accept_list()
786 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr, in add_to_accept_list()
787 params->addr_type)) in add_to_accept_list()
791 if (*num_entries >= hdev->le_accept_list_size) in add_to_accept_list()
792 return -1; in add_to_accept_list()
797 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) { in add_to_accept_list()
798 return -1; in add_to_accept_list()
802 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, in add_to_accept_list()
803 params->current_flags)) in add_to_accept_list()
807 cp.bdaddr_type = params->addr_type; in add_to_accept_list()
808 bacpy(&cp.bdaddr, &params->addr); in add_to_accept_list()
818 irk = hci_find_irk_by_addr(hdev, &params->addr, in add_to_accept_list()
819 params->addr_type); in add_to_accept_list()
823 cp.bdaddr_type = params->addr_type; in add_to_accept_list()
824 bacpy(&cp.bdaddr, &params->addr); in add_to_accept_list()
825 memcpy(cp.peer_irk, irk->val, 16); in add_to_accept_list()
828 memcpy(cp.local_irk, hdev->irk, 16); in add_to_accept_list()
842 struct hci_dev *hdev = req->hdev; in update_accept_list()
852 bool allow_rpa = hdev->suspended; in update_accept_list()
864 list_for_each_entry(b, &hdev->le_accept_list, list) { in update_accept_list()
865 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, in update_accept_list()
866 &b->bdaddr, in update_accept_list()
867 b->bdaddr_type); in update_accept_list()
868 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, in update_accept_list()
869 &b->bdaddr, in update_accept_list()
870 b->bdaddr_type); in update_accept_list()
876 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type); in update_accept_list()
883 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { in update_accept_list()
900 list_for_each_entry(params, &hdev->pend_le_conns, action) { in update_accept_list()
909 list_for_each_entry(params, &hdev->pend_le_reports, action) { in update_accept_list()
915 * - We are not currently suspending in update_accept_list()
916 * - There are 1 or more ADV monitors registered and it's not offloaded in update_accept_list()
917 * - Interleaved scanning is not currently using the allowlist in update_accept_list()
919 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && in update_accept_list()
921 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) in update_accept_list()
933 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, in hci_req_start_scan() argument
937 struct hci_dev *hdev = req->hdev; in hci_req_start_scan()
939 if (hdev->scanning_paused) { in hci_req_start_scan()
963 phy_params = (void *)ext_param_cp->data; in hci_req_start_scan()
966 ext_param_cp->own_addr_type = own_addr_type; in hci_req_start_scan()
967 ext_param_cp->filter_policy = filter_policy; in hci_req_start_scan()
972 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M; in hci_req_start_scan()
975 phy_params->type = type; in hci_req_start_scan()
976 phy_params->interval = cpu_to_le16(interval); in hci_req_start_scan()
977 phy_params->window = cpu_to_le16(window); in hci_req_start_scan()
984 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED; in hci_req_start_scan()
987 phy_params->type = type; in hci_req_start_scan()
988 phy_params->interval = cpu_to_le16(interval); in hci_req_start_scan()
989 phy_params->window = cpu_to_le16(window); in hci_req_start_scan()
1010 param_cp.interval = cpu_to_le16(interval); in hci_req_start_scan()
1028 struct hci_conn_hash *h = &hdev->conn_hash; in hci_is_le_conn_scanning()
1033 list_for_each_entry_rcu(c, &h->list, list) { in hci_is_le_conn_scanning()
1034 if (c->type == LE_LINK && c->state == BT_CONNECT && in hci_is_le_conn_scanning()
1035 test_bit(HCI_CONN_SCANNING, &c->flags)) { in hci_is_le_conn_scanning()
1052 struct hci_dev *hdev = req->hdev; in hci_req_add_le_passive_scan()
1055 u16 window, interval; in hci_req_add_le_passive_scan() local
1061 if (hdev->scanning_paused) { in hci_req_add_le_passive_scan()
1067 * during passive scanning. Not using an non-resolvable address in hci_req_add_le_passive_scan()
1076 if (hdev->enable_advmon_interleave_scan && in hci_req_add_le_passive_scan()
1080 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); in hci_req_add_le_passive_scan()
1097 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) in hci_req_add_le_passive_scan()
1100 if (hdev->suspended) { in hci_req_add_le_passive_scan()
1101 window = hdev->le_scan_window_suspend; in hci_req_add_le_passive_scan()
1102 interval = hdev->le_scan_int_suspend; in hci_req_add_le_passive_scan()
1104 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); in hci_req_add_le_passive_scan()
1106 window = hdev->le_scan_window_connect; in hci_req_add_le_passive_scan()
1107 interval = hdev->le_scan_int_connect; in hci_req_add_le_passive_scan()
1109 window = hdev->le_scan_window_adv_monitor; in hci_req_add_le_passive_scan()
1110 interval = hdev->le_scan_int_adv_monitor; in hci_req_add_le_passive_scan()
1113 * monitor for the following reasons. in hci_req_add_le_passive_scan()
1126 window = hdev->le_scan_window; in hci_req_add_le_passive_scan()
1127 interval = hdev->le_scan_interval; in hci_req_add_le_passive_scan()
1132 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, in hci_req_add_le_passive_scan()
1149 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE || in adv_instance_is_scannable()
1150 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME) in adv_instance_is_scannable()
1153 return adv_instance->scan_rsp_len ? true : false; in adv_instance_is_scannable()
1160 if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED)) in hci_req_clear_event_filter()
1163 if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) { in hci_req_clear_event_filter()
1174 struct hci_dev *hdev = req->hdev; in hci_req_set_event_filter()
1176 bool scanning = test_bit(HCI_PSCAN, &hdev->flags); in hci_req_set_event_filter()
1184 list_for_each_entry(b, &hdev->accept_list, list) { in hci_req_set_event_filter()
1186 b->current_flags)) in hci_req_set_event_filter()
1190 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr); in hci_req_set_event_filter()
1195 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); in hci_req_set_event_filter()
1201 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); in hci_req_set_event_filter()
1204 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); in hci_req_set_event_filter()
1211 if (hdev->adv_instance_timeout) { in cancel_adv_timeout()
1212 hdev->adv_instance_timeout = 0; in cancel_adv_timeout()
1213 cancel_delayed_work(&hdev->adv_instance_expire); in cancel_adv_timeout()
1217 /* This function requires the caller holds hdev->lock */
1220 bt_dev_dbg(req->hdev, "Pausing advertising instances"); in __hci_req_pause_adv_instances()
1228 if (!ext_adv_capable(req->hdev)) in __hci_req_pause_adv_instances()
1229 cancel_adv_timeout(req->hdev); in __hci_req_pause_adv_instances()
1232 /* This function requires the caller holds hdev->lock */
1237 bt_dev_dbg(req->hdev, "Resuming advertising instances"); in __hci_req_resume_adv_instances()
1239 if (ext_adv_capable(req->hdev)) { in __hci_req_resume_adv_instances()
1240 /* Call for each tracked instance to be re-enabled */ in __hci_req_resume_adv_instances()
1241 list_for_each_entry(adv, &req->hdev->adv_instances, list) { in __hci_req_resume_adv_instances()
1243 adv->instance); in __hci_req_resume_adv_instances()
1251 req->hdev->cur_adv_instance, in __hci_req_resume_adv_instances()
1256 /* This function requires the caller holds hdev->lock */
1271 if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) || in suspend_req_complete()
1272 test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) { in suspend_req_complete()
1273 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); in suspend_req_complete()
1274 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); in suspend_req_complete()
1275 wake_up(&hdev->suspend_wait_q); in suspend_req_complete()
1278 if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) { in suspend_req_complete()
1279 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); in suspend_req_complete()
1280 wake_up(&hdev->suspend_wait_q); in suspend_req_complete()
1287 struct hci_dev *hdev = req->hdev; in hci_req_add_set_adv_filter_enable()
1298 if (hdev->suspended && !enable) in hci_req_add_set_adv_filter_enable()
1299 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); in hci_req_add_set_adv_filter_enable()
1311 if (next == hdev->suspend_state) { in hci_req_prepare_suspend()
1316 hdev->suspend_state = next; in hci_req_prepare_suspend()
1321 hdev->suspended = true; in hci_req_prepare_suspend()
1324 old_state = hdev->discovery.state; in hci_req_prepare_suspend()
1326 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks); in hci_req_prepare_suspend()
1328 queue_work(hdev->req_workqueue, &hdev->discov_update); in hci_req_prepare_suspend()
1331 hdev->discovery_paused = true; in hci_req_prepare_suspend()
1332 hdev->discovery_old_state = old_state; in hci_req_prepare_suspend()
1337 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks); in hci_req_prepare_suspend()
1338 cancel_delayed_work(&hdev->discov_off); in hci_req_prepare_suspend()
1339 queue_delayed_work(hdev->req_workqueue, in hci_req_prepare_suspend()
1340 &hdev->discov_off, 0); in hci_req_prepare_suspend()
1344 if (hdev->adv_instance_cnt) in hci_req_prepare_suspend()
1347 hdev->advertising_paused = true; in hci_req_prepare_suspend()
1348 hdev->advertising_old_state = old_state; in hci_req_prepare_suspend()
1351 if (test_bit(HCI_PSCAN, &hdev->flags)) { in hci_req_prepare_suspend()
1355 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); in hci_req_prepare_suspend()
1367 /* Prevent disconnects from causing scanning to be re-enabled */ in hci_req_prepare_suspend()
1368 hdev->scanning_paused = true; in hci_req_prepare_suspend()
1375 list_for_each_entry(conn, &hdev->conn_hash.list, list) { in hci_req_prepare_suspend()
1384 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks); in hci_req_prepare_suspend()
1388 hdev->scanning_paused = false; in hci_req_prepare_suspend()
1394 hdev->scanning_paused = true; in hci_req_prepare_suspend()
1397 hdev->suspended = false; in hci_req_prepare_suspend()
1398 hdev->scanning_paused = false; in hci_req_prepare_suspend()
1410 hdev->advertising_paused = false; in hci_req_prepare_suspend()
1411 if (hdev->advertising_old_state) { in hci_req_prepare_suspend()
1413 hdev->suspend_tasks); in hci_req_prepare_suspend()
1415 queue_work(hdev->req_workqueue, in hci_req_prepare_suspend()
1416 &hdev->discoverable_update); in hci_req_prepare_suspend()
1417 hdev->advertising_old_state = 0; in hci_req_prepare_suspend()
1421 if (hdev->adv_instance_cnt) in hci_req_prepare_suspend()
1425 hdev->discovery_paused = false; in hci_req_prepare_suspend()
1426 if (hdev->discovery_old_state != DISCOVERY_STOPPED && in hci_req_prepare_suspend()
1427 hdev->discovery_old_state != DISCOVERY_STOPPING) { in hci_req_prepare_suspend()
1428 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks); in hci_req_prepare_suspend()
1430 queue_work(hdev->req_workqueue, &hdev->discov_update); in hci_req_prepare_suspend()
1436 hdev->suspend_state = next; in hci_req_prepare_suspend()
1439 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks); in hci_req_prepare_suspend()
1440 wake_up(&hdev->suspend_wait_q); in hci_req_prepare_suspend()
1445 return adv_instance_is_scannable(hdev, hdev->cur_adv_instance); in adv_cur_instance_is_scannable()
1450 if (ext_adv_capable(req->hdev)) { in __hci_req_disable_advertising()
1491 return adv_instance->flags; in get_adv_instance_flags()
1524 if (hdev->conn_hash.le_num_peripheral > 0) { in is_advertising_allowed()
1527 if (!connectable && !(hdev->le_states[2] & 0x10)) in is_advertising_allowed()
1533 if (connectable && (!(hdev->le_states[4] & 0x40) || in is_advertising_allowed()
1534 !(hdev->le_states[2] & 0x20))) in is_advertising_allowed()
1539 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { in is_advertising_allowed()
1541 if (!connectable && !(hdev->le_states[2] & 0x02)) in is_advertising_allowed()
1547 if (connectable && (!(hdev->le_states[4] & 0x08) || in is_advertising_allowed()
1548 !(hdev->le_states[2] & 0x08))) in is_advertising_allowed()
1557 struct hci_dev *hdev = req->hdev; in __hci_req_enable_advertising()
1565 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance); in __hci_req_enable_advertising()
1566 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); in __hci_req_enable_advertising()
1587 /* Set require_privacy to true only when non-connectable in __hci_req_enable_advertising()
1589 * non-resolvable private address. in __hci_req_enable_advertising()
1599 adv_min_interval = adv_instance->min_interval; in __hci_req_enable_advertising()
1600 adv_max_interval = adv_instance->max_interval; in __hci_req_enable_advertising()
1602 adv_min_interval = hdev->le_adv_min_interval; in __hci_req_enable_advertising()
1603 adv_max_interval = hdev->le_adv_max_interval; in __hci_req_enable_advertising()
1624 cp.channel_map = hdev->le_adv_channel_map; in __hci_req_enable_advertising()
1637 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) in append_local_name()
1641 complete_len = strlen(hdev->dev_name); in append_local_name()
1644 hdev->dev_name, complete_len + 1); in append_local_name()
1647 short_len = strlen(hdev->short_name); in append_local_name()
1650 hdev->short_name, short_len + 1); in append_local_name()
1658 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); in append_local_name()
1670 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); in append_appearance()
1677 if (hdev->appearance) in create_default_scan_rsp_data()
1694 instance_flags = adv_instance->flags; in create_instance_scan_rsp_data()
1696 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) in create_instance_scan_rsp_data()
1699 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data, in create_instance_scan_rsp_data()
1700 adv_instance->scan_rsp_len); in create_instance_scan_rsp_data()
1702 scan_rsp_len += adv_instance->scan_rsp_len; in create_instance_scan_rsp_data()
1712 struct hci_dev *hdev = req->hdev; in __hci_req_update_scan_rsp_data()
1732 if (hdev->scan_rsp_data_len == len && in __hci_req_update_scan_rsp_data()
1733 !memcmp(pdu.data, hdev->scan_rsp_data, len)) in __hci_req_update_scan_rsp_data()
1736 memcpy(hdev->scan_rsp_data, pdu.data, len); in __hci_req_update_scan_rsp_data()
1737 hdev->scan_rsp_data_len = len; in __hci_req_update_scan_rsp_data()
1757 if (hdev->scan_rsp_data_len == len && in __hci_req_update_scan_rsp_data()
1758 !memcmp(cp.data, hdev->scan_rsp_data, len)) in __hci_req_update_scan_rsp_data()
1761 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); in __hci_req_update_scan_rsp_data()
1762 hdev->scan_rsp_data_len = len; in __hci_req_update_scan_rsp_data()
1788 if (adv_instance && eir_get_data(adv_instance->adv_data, in create_instance_adv_data()
1789 adv_instance->adv_data_len, EIR_FLAGS, in create_instance_adv_data()
1827 memcpy(ptr, adv_instance->adv_data, in create_instance_adv_data()
1828 adv_instance->adv_data_len); in create_instance_adv_data()
1829 ad_len += adv_instance->adv_data_len; in create_instance_adv_data()
1830 ptr += adv_instance->adv_data_len; in create_instance_adv_data()
1838 adv_tx_power = adv_instance->tx_power; in create_instance_adv_data()
1840 adv_tx_power = hdev->adv_tx_power; in create_instance_adv_data()
1842 adv_tx_power = hdev->adv_tx_power; in create_instance_adv_data()
1861 struct hci_dev *hdev = req->hdev; in __hci_req_update_adv_data()
1878 if (hdev->adv_data_len == len && in __hci_req_update_adv_data()
1879 memcmp(pdu.data, hdev->adv_data, len) == 0) in __hci_req_update_adv_data()
1882 memcpy(hdev->adv_data, pdu.data, len); in __hci_req_update_adv_data()
1883 hdev->adv_data_len = len; in __hci_req_update_adv_data()
1900 if (hdev->adv_data_len == len && in __hci_req_update_adv_data()
1901 memcmp(cp.data, hdev->adv_data, len) == 0) in __hci_req_update_adv_data()
1904 memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); in __hci_req_update_adv_data()
1905 hdev->adv_data_len = len; in __hci_req_update_adv_data()
1926 BT_DBG("%s status %u", hdev->name, status); in enable_addr_resolution_complete()
1955 list_empty(&hdev->adv_instances)) in hci_req_reenable_advertising()
1960 if (hdev->cur_adv_instance) { in hci_req_reenable_advertising()
1961 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance, in hci_req_reenable_advertising()
1988 hdev->adv_instance_timeout = 0; in adv_timeout_expire()
1990 instance = hdev->cur_adv_instance; in adv_timeout_expire()
1998 if (list_empty(&hdev->adv_instances)) in adv_timeout_expire()
2010 struct hci_dev *hdev = req->hdev; in hci_req_add_le_interleaved_scan()
2019 switch (hdev->interleave_scan_state) { in hci_req_add_le_interleaved_scan()
2022 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; in hci_req_add_le_interleaved_scan()
2026 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; in hci_req_add_le_interleaved_scan()
2030 ret = -1; in hci_req_add_le_interleaved_scan()
2045 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { in interleave_scan_work()
2046 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); in interleave_scan_work()
2047 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { in interleave_scan_work()
2048 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); in interleave_scan_work()
2059 queue_delayed_work(hdev->req_workqueue, in interleave_scan_work()
2060 &hdev->interleave_scan, timeout); in interleave_scan_work()
2092 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); in hci_get_random_address()
2098 bacpy(rand_addr, &hdev->rpa); in hci_get_random_address()
2104 * use an non-resolvable private address. This is useful for in hci_get_random_address()
2105 * non-connectable advertising. in hci_get_random_address()
2111 /* The non-resolvable private address is generated in hci_get_random_address()
2118 /* The non-resolvable private address shall not be in hci_get_random_address()
2121 if (bacmp(&hdev->bdaddr, &nrpa)) in hci_get_random_address()
2144 struct hci_dev *hdev = req->hdev; in set_random_addr()
2169 struct hci_dev *hdev = req->hdev; in __hci_req_setup_ext_adv_instance()
2181 return -EINVAL; in __hci_req_setup_ext_adv_instance()
2195 return -EPERM; in __hci_req_setup_ext_adv_instance()
2197 /* Set require_privacy to true only when non-connectable in __hci_req_setup_ext_adv_instance()
2199 * non-resolvable private address. in __hci_req_setup_ext_adv_instance()
2210 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval); in __hci_req_setup_ext_adv_instance()
2211 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval); in __hci_req_setup_ext_adv_instance()
2212 cp.tx_power = adv_instance->tx_power; in __hci_req_setup_ext_adv_instance()
2214 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); in __hci_req_setup_ext_adv_instance()
2215 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); in __hci_req_setup_ext_adv_instance()
2240 cp.channel_map = hdev->le_adv_channel_map; in __hci_req_setup_ext_adv_instance()
2263 if (!bacmp(&random_addr, &adv_instance->random_addr)) in __hci_req_setup_ext_adv_instance()
2266 if (!bacmp(&random_addr, &hdev->random_addr)) in __hci_req_setup_ext_adv_instance()
2269 * uses hdev->random_addr to track its address so in __hci_req_setup_ext_adv_instance()
2271 * random address since hdev->random_addr is shared with in __hci_req_setup_ext_adv_instance()
2292 struct hci_dev *hdev = req->hdev; in __hci_req_enable_ext_advertising()
2301 return -EINVAL; in __hci_req_enable_ext_advertising()
2307 adv_set = (void *) cp->data; in __hci_req_enable_ext_advertising()
2311 cp->enable = 0x01; in __hci_req_enable_ext_advertising()
2312 cp->num_of_sets = 0x01; in __hci_req_enable_ext_advertising()
2316 adv_set->handle = instance; in __hci_req_enable_ext_advertising()
2321 if (adv_instance && adv_instance->duration) { in __hci_req_enable_ext_advertising()
2322 u16 duration = adv_instance->timeout * MSEC_PER_SEC; in __hci_req_enable_ext_advertising()
2324 /* Time = N * 10 ms */ in __hci_req_enable_ext_advertising()
2325 adv_set->duration = cpu_to_le16(duration / 10); in __hci_req_enable_ext_advertising()
2329 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets, in __hci_req_enable_ext_advertising()
2337 struct hci_dev *hdev = req->hdev; in __hci_req_disable_ext_adv_instance()
2345 return -EINVAL; in __hci_req_disable_ext_adv_instance()
2350 adv_set = (void *)cp->data; in __hci_req_disable_ext_adv_instance()
2353 cp->num_of_sets = !!instance; in __hci_req_disable_ext_adv_instance()
2354 cp->enable = 0x00; in __hci_req_disable_ext_adv_instance()
2356 adv_set->handle = instance; in __hci_req_disable_ext_adv_instance()
2358 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets; in __hci_req_disable_ext_adv_instance()
2366 struct hci_dev *hdev = req->hdev; in __hci_req_remove_ext_adv_instance()
2370 return -EINVAL; in __hci_req_remove_ext_adv_instance()
2379 struct hci_dev *hdev = req->hdev; in __hci_req_start_ext_adv()
2386 if (adv_instance && !adv_instance->pending) in __hci_req_start_ext_adv()
2402 struct hci_dev *hdev = req->hdev; in __hci_req_schedule_adv_instance()
2407 list_empty(&hdev->adv_instances)) in __hci_req_schedule_adv_instance()
2408 return -EPERM; in __hci_req_schedule_adv_instance()
2410 if (hdev->adv_instance_timeout) in __hci_req_schedule_adv_instance()
2411 return -EBUSY; in __hci_req_schedule_adv_instance()
2415 return -ENOENT; in __hci_req_schedule_adv_instance()
2425 if (adv_instance->timeout == 0 || in __hci_req_schedule_adv_instance()
2426 adv_instance->duration <= adv_instance->remaining_time) in __hci_req_schedule_adv_instance()
2427 timeout = adv_instance->duration; in __hci_req_schedule_adv_instance()
2429 timeout = adv_instance->remaining_time; in __hci_req_schedule_adv_instance()
2434 if (adv_instance->timeout) in __hci_req_schedule_adv_instance()
2435 adv_instance->remaining_time = in __hci_req_schedule_adv_instance()
2436 adv_instance->remaining_time - timeout; in __hci_req_schedule_adv_instance()
2440 hdev->adv_instance_timeout = timeout; in __hci_req_schedule_adv_instance()
2441 queue_delayed_work(hdev->req_workqueue, in __hci_req_schedule_adv_instance()
2442 &hdev->adv_instance_expire, in __hci_req_schedule_adv_instance()
2446 /* If we're just re-scheduling the same instance again then do not in __hci_req_schedule_adv_instance()
2450 if (!force && hdev->cur_adv_instance == instance && in __hci_req_schedule_adv_instance()
2454 hdev->cur_adv_instance = instance; in __hci_req_schedule_adv_instance()
2467 * - force == true: The instance will be removed even when its remaining
2469 * - force == false: the instance will be deactivated but kept stored unless
2473 * - force == true: All instances will be removed regardless of their timeout
2475 * - force == false: Only instances that have a timeout will be removed.
2486 if (!instance || hdev->cur_adv_instance == instance) in hci_req_clear_adv_instance()
2493 if (instance && hdev->cur_adv_instance == instance) in hci_req_clear_adv_instance()
2497 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, in hci_req_clear_adv_instance()
2499 if (!(force || adv_instance->timeout)) in hci_req_clear_adv_instance()
2502 rem_inst = adv_instance->instance; in hci_req_clear_adv_instance()
2510 if (force || (adv_instance && adv_instance->timeout && in hci_req_clear_adv_instance()
2511 !adv_instance->remaining_time)) { in hci_req_clear_adv_instance()
2514 next_instance->instance == instance) in hci_req_clear_adv_instance()
2528 __hci_req_schedule_adv_instance(req, next_instance->instance, in hci_req_clear_adv_instance()
2535 struct hci_dev *hdev = req->hdev; in hci_update_random_address()
2555 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); in hci_update_random_address()
2561 set_random_addr(req, &hdev->rpa); in hci_update_random_address()
2567 * use an non-resolvable private address. This is useful for active in hci_update_random_address()
2568 * scanning and non-connectable advertising. in hci_update_random_address()
2574 /* The non-resolvable private address is generated in hci_update_random_address()
2581 /* The non-resolvable private address shall not be in hci_update_random_address()
2584 if (bacmp(&hdev->bdaddr, &nrpa)) in hci_update_random_address()
2598 * In case BR/EDR has been disabled on a dual-mode controller in hci_update_random_address()
2603 !bacmp(&hdev->bdaddr, BDADDR_ANY) || in hci_update_random_address()
2605 bacmp(&hdev->static_addr, BDADDR_ANY))) { in hci_update_random_address()
2607 if (bacmp(&hdev->static_addr, &hdev->random_addr)) in hci_update_random_address()
2609 &hdev->static_addr); in hci_update_random_address()
2625 list_for_each_entry(b, &hdev->accept_list, list) { in disconnected_accept_list_entries()
2628 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); in disconnected_accept_list_entries()
2632 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) in disconnected_accept_list_entries()
2641 struct hci_dev *hdev = req->hdev; in __hci_req_update_scan()
2653 if (hdev->scanning_paused) in __hci_req_update_scan()
2665 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && in __hci_req_update_scan()
2666 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) in __hci_req_update_scan()
2674 hci_dev_lock(req->hdev); in update_scan()
2676 hci_dev_unlock(req->hdev); in update_scan()
2689 struct hci_dev *hdev = req->hdev; in connectable_update()
2696 * by-product of disabling connectable, we need to update the in connectable_update()
2700 __hci_req_update_adv_data(req, hdev->cur_adv_instance); in connectable_update()
2704 !list_empty(&hdev->adv_instances)) { in connectable_update()
2706 __hci_req_start_ext_adv(req, hdev->cur_adv_instance); in connectable_update()
2733 list_for_each_entry(uuid, &hdev->uuids, list) in get_service_classes()
2734 val |= uuid->svc_hint; in get_service_classes()
2741 struct hci_dev *hdev = req->hdev; in __hci_req_update_class()
2755 cod[0] = hdev->minor_class; in __hci_req_update_class()
2756 cod[1] = hdev->major_class; in __hci_req_update_class()
2762 if (memcmp(cod, hdev->dev_class, 3) == 0) in __hci_req_update_class()
2770 struct hci_dev *hdev = req->hdev; in write_iac()
2778 cp.num_iac = min_t(u8, hdev->num_iac, 2); in write_iac()
2799 struct hci_dev *hdev = req->hdev; in discoverable_update()
2844 switch (conn->state) { in __hci_abort_conn()
2847 if (conn->type == AMP_LINK) { in __hci_abort_conn()
2850 cp.phy_handle = HCI_PHY_HANDLE(conn->handle); in __hci_abort_conn()
2857 dc.handle = cpu_to_le16(conn->handle); in __hci_abort_conn()
2862 conn->state = BT_DISCONN; in __hci_abort_conn()
2866 if (conn->type == LE_LINK) { in __hci_abort_conn()
2867 if (test_bit(HCI_CONN_SCANNING, &conn->flags)) in __hci_abort_conn()
2871 } else if (conn->type == ACL_LINK) { in __hci_abort_conn()
2872 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2) in __hci_abort_conn()
2875 6, &conn->dst); in __hci_abort_conn()
2879 if (conn->type == ACL_LINK) { in __hci_abort_conn()
2882 bacpy(&rej.bdaddr, &conn->dst); in __hci_abort_conn()
2887 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { in __hci_abort_conn()
2890 bacpy(&rej.bdaddr, &conn->dst); in __hci_abort_conn()
2893 * allowed error values (0x0D-0x0F) which isn't in __hci_abort_conn()
2895 * function. To be safe hard-code one of the in __hci_abort_conn()
2905 conn->state = BT_CLOSED; in __hci_abort_conn()
2921 hci_req_init(&req, conn->hdev); in hci_abort_conn()
2926 if (err && err != -ENODATA) { in hci_abort_conn()
2927 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err); in hci_abort_conn()
2936 hci_dev_lock(req->hdev); in update_bg_scan()
2938 hci_dev_unlock(req->hdev); in update_bg_scan()
2976 if (test_bit(HCI_INQUIRY, &req->hdev->flags)) in bredr_inquiry()
2979 bt_dev_dbg(req->hdev, ""); in bredr_inquiry()
2981 hci_dev_lock(req->hdev); in bredr_inquiry()
2982 hci_inquiry_cache_flush(req->hdev); in bredr_inquiry()
2983 hci_dev_unlock(req->hdev); in bredr_inquiry()
2987 if (req->hdev->discovery.limited) in bredr_inquiry()
3010 cancel_delayed_work(&hdev->le_scan_restart); in le_scan_disable_work()
3019 hdev->discovery.scan_start = 0; in le_scan_disable_work()
3029 if (hdev->discovery.type == DISCOV_TYPE_LE) in le_scan_disable_work()
3032 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) in le_scan_disable_work()
3035 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { in le_scan_disable_work()
3036 if (!test_bit(HCI_INQUIRY, &hdev->flags) && in le_scan_disable_work()
3037 hdev->discovery.state != DISCOVERY_RESOLVING) in le_scan_disable_work()
3060 struct hci_dev *hdev = req->hdev; in le_scan_restart()
3066 if (hdev->scanning_paused) { in le_scan_restart()
3112 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || in le_scan_restart_work()
3113 !hdev->discovery.scan_start) in le_scan_restart_work()
3116 /* When the scan was started, hdev->le_scan_disable has been queued in le_scan_restart_work()
3121 duration = hdev->discovery.scan_duration; in le_scan_restart_work()
3122 scan_start = hdev->discovery.scan_start; in le_scan_restart_work()
3124 if (now - scan_start <= duration) { in le_scan_restart_work()
3128 elapsed = now - scan_start; in le_scan_restart_work()
3130 elapsed = ULONG_MAX - scan_start + now; in le_scan_restart_work()
3132 timeout = duration - elapsed; in le_scan_restart_work()
3137 queue_delayed_work(hdev->req_workqueue, in le_scan_restart_work()
3138 &hdev->le_scan_disable, timeout); in le_scan_restart_work()
3146 uint16_t interval = opt; in active_scan() local
3147 struct hci_dev *hdev = req->hdev; in active_scan()
3169 * address (when privacy feature has been enabled) or non-resolvable in active_scan()
3179 * monitor is activated, otherwise AdvMon can only receive one in active_scan()
3194 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, in active_scan()
3195 hdev->le_scan_window_discovery, own_addr_type, in active_scan()
3204 bt_dev_dbg(req->hdev, ""); in interleaved_discov()
3217 bt_dev_dbg(hdev, "type %u", hdev->discovery.type); in start_discovery()
3219 switch (hdev->discovery.type) { in start_discovery()
3236 &hdev->quirks)) { in start_discovery()
3239 * interval. We must leave some time for the controller in start_discovery()
3243 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT, in start_discovery()
3248 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); in start_discovery()
3249 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, in start_discovery()
3254 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, in start_discovery()
3265 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); in start_discovery()
3272 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && in start_discovery()
3273 hdev->discovery.result_filtering) { in start_discovery()
3274 hdev->discovery.scan_start = jiffies; in start_discovery()
3275 hdev->discovery.scan_duration = timeout; in start_discovery()
3278 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, in start_discovery()
3284 struct hci_dev *hdev = req->hdev; in hci_req_stop_discovery()
3285 struct discovery_state *d = &hdev->discovery; in hci_req_stop_discovery()
3290 bt_dev_dbg(hdev, "state %u", hdev->discovery.state); in hci_req_stop_discovery()
3292 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { in hci_req_stop_discovery()
3293 if (test_bit(HCI_INQUIRY, &hdev->flags)) in hci_req_stop_discovery()
3297 cancel_delayed_work(&hdev->le_scan_disable); in hci_req_stop_discovery()
3298 cancel_delayed_work(&hdev->le_scan_restart); in hci_req_stop_discovery()
3311 /* No further actions needed for LE-only discovery */ in hci_req_stop_discovery()
3312 if (d->type == DISCOV_TYPE_LE) in hci_req_stop_discovery()
3315 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { in hci_req_stop_discovery()
3321 bacpy(&cp.bdaddr, &e->data.bdaddr); in hci_req_stop_discovery()
3332 hci_dev_lock(req->hdev); in stop_discovery()
3334 hci_dev_unlock(req->hdev); in stop_discovery()
3345 switch (hdev->discovery.state) { in discov_update()
3382 hdev->discov_timeout = 0; in discov_off()
3392 struct hci_dev *hdev = req->hdev; in powered_update_hci()
3433 list_empty(&hdev->adv_instances)) { in powered_update_hci()
3455 } else if (!list_empty(&hdev->adv_instances)) { in powered_update_hci()
3458 adv_instance = list_first_entry(&hdev->adv_instances, in powered_update_hci()
3461 adv_instance->instance, in powered_update_hci()
3467 if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) in powered_update_hci()
3501 INIT_WORK(&hdev->discov_update, discov_update); in hci_request_setup()
3502 INIT_WORK(&hdev->bg_scan_update, bg_scan_update); in hci_request_setup()
3503 INIT_WORK(&hdev->scan_update, scan_update_work); in hci_request_setup()
3504 INIT_WORK(&hdev->connectable_update, connectable_update_work); in hci_request_setup()
3505 INIT_WORK(&hdev->discoverable_update, discoverable_update_work); in hci_request_setup()
3506 INIT_DELAYED_WORK(&hdev->discov_off, discov_off); in hci_request_setup()
3507 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); in hci_request_setup()
3508 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); in hci_request_setup()
3509 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); in hci_request_setup()
3510 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); in hci_request_setup()
3517 cancel_work_sync(&hdev->discov_update); in hci_request_cancel_all()
3518 cancel_work_sync(&hdev->bg_scan_update); in hci_request_cancel_all()
3519 cancel_work_sync(&hdev->scan_update); in hci_request_cancel_all()
3520 cancel_work_sync(&hdev->connectable_update); in hci_request_cancel_all()
3521 cancel_work_sync(&hdev->discoverable_update); in hci_request_cancel_all()
3522 cancel_delayed_work_sync(&hdev->discov_off); in hci_request_cancel_all()
3523 cancel_delayed_work_sync(&hdev->le_scan_disable); in hci_request_cancel_all()
3524 cancel_delayed_work_sync(&hdev->le_scan_restart); in hci_request_cancel_all()
3526 if (hdev->adv_instance_timeout) { in hci_request_cancel_all()
3527 cancel_delayed_work_sync(&hdev->adv_instance_expire); in hci_request_cancel_all()
3528 hdev->adv_instance_timeout = 0; in hci_request_cancel_all()