Lines Matching refs:qca

283 	struct qca_data *qca = hu->priv;  in serial_clock_vote()  local
286 bool old_vote = (qca->tx_vote | qca->rx_vote); in serial_clock_vote()
291 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
294 qca->vote_off_ms += diff; in serial_clock_vote()
296 qca->vote_on_ms += diff; in serial_clock_vote()
300 qca->tx_vote = true; in serial_clock_vote()
301 qca->tx_votes_on++; in serial_clock_vote()
305 qca->rx_vote = true; in serial_clock_vote()
306 qca->rx_votes_on++; in serial_clock_vote()
310 qca->tx_vote = false; in serial_clock_vote()
311 qca->tx_votes_off++; in serial_clock_vote()
315 qca->rx_vote = false; in serial_clock_vote()
316 qca->rx_votes_off++; in serial_clock_vote()
324 new_vote = qca->rx_vote | qca->tx_vote; in serial_clock_vote()
335 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
338 qca->votes_on++; in serial_clock_vote()
339 qca->vote_off_ms += diff; in serial_clock_vote()
341 qca->votes_off++; in serial_clock_vote()
342 qca->vote_on_ms += diff; in serial_clock_vote()
344 qca->vote_last_jif = jiffies; in serial_clock_vote()
355 struct qca_data *qca = hu->priv; in send_hci_ibs_cmd() local
368 skb_queue_tail(&qca->txq, skb); in send_hci_ibs_cmd()
375 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_awake_device() local
377 struct hci_uart *hu = qca->hu; in qca_wq_awake_device()
386 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
392 qca->ibs_sent_wakes++; in qca_wq_awake_device()
395 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in qca_wq_awake_device()
396 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in qca_wq_awake_device()
398 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
406 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_awake_rx() local
408 struct hci_uart *hu = qca->hu; in qca_wq_awake_rx()
415 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
416 qca->rx_ibs_state = HCI_IBS_RX_AWAKE; in qca_wq_awake_rx()
424 qca->ibs_sent_wacks++; in qca_wq_awake_rx()
426 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
434 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_serial_rx_clock_vote_off() local
436 struct hci_uart *hu = qca->hu; in qca_wq_serial_rx_clock_vote_off()
445 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_serial_tx_clock_vote_off() local
447 struct hci_uart *hu = qca->hu; in qca_wq_serial_tx_clock_vote_off()
462 struct qca_data *qca = from_timer(qca, t, tx_idle_timer); in hci_ibs_tx_idle_timeout() local
463 struct hci_uart *hu = qca->hu; in hci_ibs_tx_idle_timeout()
466 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
468 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_tx_idle_timeout()
471 switch (qca->tx_ibs_state) { in hci_ibs_tx_idle_timeout()
478 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in hci_ibs_tx_idle_timeout()
479 qca->ibs_sent_slps++; in hci_ibs_tx_idle_timeout()
480 queue_work(qca->workqueue, &qca->ws_tx_vote_off); in hci_ibs_tx_idle_timeout()
486 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
490 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_tx_idle_timeout()
495 struct qca_data *qca = from_timer(qca, t, wake_retrans_timer); in hci_ibs_wake_retrans_timeout() local
496 struct hci_uart *hu = qca->hu; in hci_ibs_wake_retrans_timeout()
501 hu, qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
503 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_wake_retrans_timeout()
507 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in hci_ibs_wake_retrans_timeout()
508 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
512 switch (qca->tx_ibs_state) { in hci_ibs_wake_retrans_timeout()
520 qca->ibs_sent_wakes++; in hci_ibs_wake_retrans_timeout()
521 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in hci_ibs_wake_retrans_timeout()
522 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in hci_ibs_wake_retrans_timeout()
528 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
532 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
541 struct qca_data *qca = container_of(work, struct qca_data, in qca_controller_memdump_timeout() local
543 struct hci_uart *hu = qca->hu; in qca_controller_memdump_timeout()
545 mutex_lock(&qca->hci_memdump_lock); in qca_controller_memdump_timeout()
546 if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { in qca_controller_memdump_timeout()
547 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_controller_memdump_timeout()
548 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { in qca_controller_memdump_timeout()
556 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump_timeout()
564 struct qca_data *qca; in qca_open() local
571 qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); in qca_open()
572 if (!qca) in qca_open()
575 skb_queue_head_init(&qca->txq); in qca_open()
576 skb_queue_head_init(&qca->tx_wait_q); in qca_open()
577 skb_queue_head_init(&qca->rx_memdump_q); in qca_open()
578 spin_lock_init(&qca->hci_ibs_lock); in qca_open()
579 mutex_init(&qca->hci_memdump_lock); in qca_open()
580 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0); in qca_open()
581 if (!qca->workqueue) { in qca_open()
583 kfree(qca); in qca_open()
587 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx); in qca_open()
588 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device); in qca_open()
589 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off); in qca_open()
590 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off); in qca_open()
591 INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump); in qca_open()
592 INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout, in qca_open()
594 init_waitqueue_head(&qca->suspend_wait_q); in qca_open()
596 qca->hu = hu; in qca_open()
597 init_completion(&qca->drop_ev_comp); in qca_open()
600 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_open()
601 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in qca_open()
603 qca->vote_last_jif = jiffies; in qca_open()
605 hu->priv = qca; in qca_open()
627 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0); in qca_open()
628 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; in qca_open()
630 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); in qca_open()
631 qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS; in qca_open()
634 qca->tx_idle_delay, qca->wake_retrans); in qca_open()
642 struct qca_data *qca = hu->priv; in qca_debugfs_init() local
649 if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags)) in qca_debugfs_init()
656 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); in qca_debugfs_init()
657 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); in qca_debugfs_init()
659 &qca->ibs_sent_slps); in qca_debugfs_init()
661 &qca->ibs_sent_wakes); in qca_debugfs_init()
663 &qca->ibs_sent_wacks); in qca_debugfs_init()
665 &qca->ibs_recv_slps); in qca_debugfs_init()
667 &qca->ibs_recv_wakes); in qca_debugfs_init()
669 &qca->ibs_recv_wacks); in qca_debugfs_init()
670 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote); in qca_debugfs_init()
671 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on); in qca_debugfs_init()
672 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off); in qca_debugfs_init()
673 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote); in qca_debugfs_init()
674 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on); in qca_debugfs_init()
675 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off); in qca_debugfs_init()
676 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on); in qca_debugfs_init()
677 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off); in qca_debugfs_init()
678 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms); in qca_debugfs_init()
679 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); in qca_debugfs_init()
683 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); in qca_debugfs_init()
685 &qca->tx_idle_delay); in qca_debugfs_init()
691 struct qca_data *qca = hu->priv; in qca_flush() local
695 skb_queue_purge(&qca->tx_wait_q); in qca_flush()
696 skb_queue_purge(&qca->txq); in qca_flush()
704 struct qca_data *qca = hu->priv; in qca_close() local
710 skb_queue_purge(&qca->tx_wait_q); in qca_close()
711 skb_queue_purge(&qca->txq); in qca_close()
712 skb_queue_purge(&qca->rx_memdump_q); in qca_close()
719 timer_shutdown_sync(&qca->tx_idle_timer); in qca_close()
720 timer_shutdown_sync(&qca->wake_retrans_timer); in qca_close()
721 destroy_workqueue(qca->workqueue); in qca_close()
722 qca->hu = NULL; in qca_close()
724 kfree_skb(qca->rx_skb); in qca_close()
728 kfree(qca); in qca_close()
738 struct qca_data *qca = hu->priv; in device_want_to_wakeup() local
742 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
744 qca->ibs_recv_wakes++; in device_want_to_wakeup()
747 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in device_want_to_wakeup()
748 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
752 switch (qca->rx_ibs_state) { in device_want_to_wakeup()
757 queue_work(qca->workqueue, &qca->ws_awake_rx); in device_want_to_wakeup()
758 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
769 qca->ibs_sent_wacks++; in device_want_to_wakeup()
775 qca->rx_ibs_state); in device_want_to_wakeup()
779 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
790 struct qca_data *qca = hu->priv; in device_want_to_sleep() local
792 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state); in device_want_to_sleep()
794 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
796 qca->ibs_recv_slps++; in device_want_to_sleep()
798 switch (qca->rx_ibs_state) { in device_want_to_sleep()
801 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in device_want_to_sleep()
803 queue_work(qca->workqueue, &qca->ws_rx_vote_off); in device_want_to_sleep()
812 qca->rx_ibs_state); in device_want_to_sleep()
816 wake_up_interruptible(&qca->suspend_wait_q); in device_want_to_sleep()
818 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
826 struct qca_data *qca = hu->priv; in device_woke_up() local
831 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_woke_up()
833 qca->ibs_recv_wacks++; in device_woke_up()
836 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in device_woke_up()
837 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
841 switch (qca->tx_ibs_state) { in device_woke_up()
845 qca->tx_ibs_state); in device_woke_up()
850 while ((skb = skb_dequeue(&qca->tx_wait_q))) in device_woke_up()
851 skb_queue_tail(&qca->txq, skb); in device_woke_up()
854 del_timer(&qca->wake_retrans_timer); in device_woke_up()
855 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in device_woke_up()
856 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in device_woke_up()
857 qca->tx_ibs_state = HCI_IBS_TX_AWAKE; in device_woke_up()
863 qca->tx_ibs_state); in device_woke_up()
867 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
879 struct qca_data *qca = hu->priv; in qca_enqueue() local
882 qca->tx_ibs_state); in qca_enqueue()
884 if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { in qca_enqueue()
894 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_enqueue()
900 if (test_bit(QCA_IBS_DISABLED, &qca->flags) || in qca_enqueue()
901 test_bit(QCA_SUSPENDING, &qca->flags)) { in qca_enqueue()
902 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
903 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
908 switch (qca->tx_ibs_state) { in qca_enqueue()
911 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
912 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in qca_enqueue()
913 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in qca_enqueue()
919 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
921 qca->tx_ibs_state = HCI_IBS_TX_WAKING; in qca_enqueue()
923 queue_work(qca->workqueue, &qca->ws_awake_device); in qca_enqueue()
929 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
934 qca->tx_ibs_state); in qca_enqueue()
939 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
996 struct qca_data *qca = hu->priv; in qca_dmp_hdr() local
1000 qca->controller_id); in qca_dmp_hdr()
1004 qca->fw_version); in qca_dmp_hdr()
1017 struct qca_data *qca = container_of(work, struct qca_data, in qca_controller_memdump() local
1019 struct hci_uart *hu = qca->hu; in qca_controller_memdump()
1022 struct qca_memdump_info *qca_memdump = qca->qca_memdump; in qca_controller_memdump()
1029 while ((skb = skb_dequeue(&qca->rx_memdump_q))) { in qca_controller_memdump()
1031 mutex_lock(&qca->hci_memdump_lock); in qca_controller_memdump()
1035 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || in qca_controller_memdump()
1036 qca->memdump_state == QCA_MEMDUMP_COLLECTED) { in qca_controller_memdump()
1037 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1045 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1049 qca->qca_memdump = qca_memdump; in qca_controller_memdump()
1052 qca->memdump_state = QCA_MEMDUMP_COLLECTING; in qca_controller_memdump()
1065 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_controller_memdump()
1066 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1073 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1077 queue_delayed_work(qca->workqueue, in qca_controller_memdump()
1078 &qca->ctrl_memdump_timeout, in qca_controller_memdump()
1087 kfree(qca->qca_memdump); in qca_controller_memdump()
1088 qca->qca_memdump = NULL; in qca_controller_memdump()
1089 qca->memdump_state = QCA_MEMDUMP_COLLECTED; in qca_controller_memdump()
1090 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_controller_memdump()
1091 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1092 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1104 if (!test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { in qca_controller_memdump()
1108 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1164 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_controller_memdump()
1165 kfree(qca->qca_memdump); in qca_controller_memdump()
1166 qca->qca_memdump = NULL; in qca_controller_memdump()
1167 qca->memdump_state = QCA_MEMDUMP_COLLECTED; in qca_controller_memdump()
1168 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1171 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1180 struct qca_data *qca = hu->priv; in qca_controller_memdump_event() local
1182 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_controller_memdump_event()
1183 skb_queue_tail(&qca->rx_memdump_q, skb); in qca_controller_memdump_event()
1184 queue_work(qca->workqueue, &qca->ctrl_memdump_evt); in qca_controller_memdump_event()
1192 struct qca_data *qca = hu->priv; in qca_recv_event() local
1194 if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) { in qca_recv_event()
1208 complete(&qca->drop_ev_comp); in qca_recv_event()
1258 struct qca_data *qca = hu->priv; in qca_recv() local
1263 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, in qca_recv()
1265 if (IS_ERR(qca->rx_skb)) { in qca_recv()
1266 int err = PTR_ERR(qca->rx_skb); in qca_recv()
1268 qca->rx_skb = NULL; in qca_recv()
1277 struct qca_data *qca = hu->priv; in qca_dequeue() local
1279 return skb_dequeue(&qca->txq); in qca_dequeue()
1321 struct qca_data *qca = hu->priv; in qca_set_baudrate() local
1340 skb_queue_tail(&qca->txq, skb); in qca_set_baudrate()
1345 while (!skb_queue_empty(&qca->txq)) in qca_set_baudrate()
1464 struct qca_data *qca = hu->priv; in qca_set_speed() local
1498 reinit_completion(&qca->drop_ev_comp); in qca_set_speed()
1499 set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1535 if (!wait_for_completion_timeout(&qca->drop_ev_comp, in qca_set_speed()
1542 clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1555 struct qca_data *qca = hu->priv; in qca_send_crashbuffer() local
1572 skb_queue_tail(&qca->txq, skb); in qca_send_crashbuffer()
1581 struct qca_data *qca = hu->priv; in qca_wait_for_dump_collection() local
1583 wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION, in qca_wait_for_dump_collection()
1586 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_wait_for_dump_collection()
1592 struct qca_data *qca = hu->priv; in qca_hw_error() local
1594 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_hw_error()
1595 set_bit(QCA_HW_ERROR_EVENT, &qca->flags); in qca_hw_error()
1596 bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state); in qca_hw_error()
1598 if (qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_hw_error()
1605 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_hw_error()
1608 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { in qca_hw_error()
1616 mutex_lock(&qca->hci_memdump_lock); in qca_hw_error()
1617 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { in qca_hw_error()
1620 if (qca->qca_memdump) { in qca_hw_error()
1621 kfree(qca->qca_memdump); in qca_hw_error()
1622 qca->qca_memdump = NULL; in qca_hw_error()
1624 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_hw_error()
1625 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_hw_error()
1627 mutex_unlock(&qca->hci_memdump_lock); in qca_hw_error()
1629 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || in qca_hw_error()
1630 qca->memdump_state == QCA_MEMDUMP_COLLECTED) { in qca_hw_error()
1631 cancel_work_sync(&qca->ctrl_memdump_evt); in qca_hw_error()
1632 skb_queue_purge(&qca->rx_memdump_q); in qca_hw_error()
1635 clear_bit(QCA_HW_ERROR_EVENT, &qca->flags); in qca_hw_error()
1641 struct qca_data *qca = hu->priv; in qca_cmd_timeout() local
1643 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_cmd_timeout()
1644 if (qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_cmd_timeout()
1645 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_cmd_timeout()
1648 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { in qca_cmd_timeout()
1656 mutex_lock(&qca->hci_memdump_lock); in qca_cmd_timeout()
1657 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { in qca_cmd_timeout()
1658 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_cmd_timeout()
1659 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { in qca_cmd_timeout()
1666 mutex_unlock(&qca->hci_memdump_lock); in qca_cmd_timeout()
1774 struct qca_data *qca = hu->priv; in qca_power_on() local
1803 clear_bit(QCA_BT_OFF, &qca->flags); in qca_power_on()
1821 struct qca_data *qca = hu->priv; in qca_setup() local
1834 clear_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1836 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_setup()
1868 qca->memdump_state = QCA_MEMDUMP_IDLE; in qca_setup()
1875 clear_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_setup()
1928 clear_bit(QCA_IBS_DISABLED, &qca->flags); in qca_setup()
1936 set_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1943 set_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1968 qca->fw_version = le16_to_cpu(ver.patch_ver); in qca_setup()
1969 qca->controller_id = le16_to_cpu(ver.rom_ver); in qca_setup()
2088 struct qca_data *qca = hu->priv; in qca_power_shutdown() local
2097 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
2098 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_power_shutdown()
2100 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
2135 set_bit(QCA_BT_OFF, &qca->flags); in qca_power_shutdown()
2141 struct qca_data *qca = hu->priv; in qca_power_off() local
2147 del_timer_sync(&qca->wake_retrans_timer); in qca_power_off()
2148 del_timer_sync(&qca->tx_idle_timer); in qca_power_off()
2152 && qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_power_off()
2204 static int qca_init_regulators(struct qca_power *qca, in qca_init_regulators() argument
2211 bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL); in qca_init_regulators()
2218 ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk); in qca_init_regulators()
2228 qca->vreg_bulk = bulk; in qca_init_regulators()
2229 qca->num_vregs = num_vregs; in qca_init_regulators()
2401 struct qca_data *qca = hu->priv; in qca_serdev_shutdown() local
2406 if (test_bit(QCA_BT_OFF, &qca->flags) || in qca_serdev_shutdown()
2437 struct qca_data *qca = hu->priv; in qca_suspend() local
2444 set_bit(QCA_SUSPENDING, &qca->flags); in qca_suspend()
2449 if (test_bit(QCA_ROM_FW, &qca->flags)) in qca_suspend()
2456 if (test_bit(QCA_BT_OFF, &qca->flags) && in qca_suspend()
2457 !test_bit(QCA_SSR_TRIGGERED, &qca->flags)) in qca_suspend()
2460 if (test_bit(QCA_IBS_DISABLED, &qca->flags) || in qca_suspend()
2461 test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { in qca_suspend()
2462 wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ? in qca_suspend()
2470 wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED, in qca_suspend()
2473 if (test_bit(QCA_IBS_DISABLED, &qca->flags)) { in qca_suspend()
2480 cancel_work_sync(&qca->ws_awake_device); in qca_suspend()
2481 cancel_work_sync(&qca->ws_awake_rx); in qca_suspend()
2483 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in qca_suspend()
2486 switch (qca->tx_ibs_state) { in qca_suspend()
2488 del_timer(&qca->wake_retrans_timer); in qca_suspend()
2491 del_timer(&qca->tx_idle_timer); in qca_suspend()
2502 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_suspend()
2503 qca->ibs_sent_slps++; in qca_suspend()
2511 BT_ERR("Spurious tx state %d", qca->tx_ibs_state); in qca_suspend()
2516 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_suspend()
2530 ret = wait_event_interruptible_timeout(qca->suspend_wait_q, in qca_suspend()
2531 qca->rx_ibs_state == HCI_IBS_RX_ASLEEP, in qca_suspend()
2541 clear_bit(QCA_SUSPENDING, &qca->flags); in qca_suspend()
2551 struct qca_data *qca = hu->priv; in qca_resume() local
2553 clear_bit(QCA_SUSPENDING, &qca->flags); in qca_resume()