Lines Matching refs:qca
281 struct qca_data *qca = hu->priv; in serial_clock_vote() local
284 bool old_vote = (qca->tx_vote | qca->rx_vote); in serial_clock_vote()
289 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
292 qca->vote_off_ms += diff; in serial_clock_vote()
294 qca->vote_on_ms += diff; in serial_clock_vote()
298 qca->tx_vote = true; in serial_clock_vote()
299 qca->tx_votes_on++; in serial_clock_vote()
303 qca->rx_vote = true; in serial_clock_vote()
304 qca->rx_votes_on++; in serial_clock_vote()
308 qca->tx_vote = false; in serial_clock_vote()
309 qca->tx_votes_off++; in serial_clock_vote()
313 qca->rx_vote = false; in serial_clock_vote()
314 qca->rx_votes_off++; in serial_clock_vote()
322 new_vote = qca->rx_vote | qca->tx_vote; in serial_clock_vote()
333 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
336 qca->votes_on++; in serial_clock_vote()
337 qca->vote_off_ms += diff; in serial_clock_vote()
339 qca->votes_off++; in serial_clock_vote()
340 qca->vote_on_ms += diff; in serial_clock_vote()
342 qca->vote_last_jif = jiffies; in serial_clock_vote()
353 struct qca_data *qca = hu->priv; in send_hci_ibs_cmd() local
366 skb_queue_tail(&qca->txq, skb); in send_hci_ibs_cmd()
373 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_awake_device() local
375 struct hci_uart *hu = qca->hu; in qca_wq_awake_device()
384 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
390 qca->ibs_sent_wakes++; in qca_wq_awake_device()
393 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in qca_wq_awake_device()
394 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in qca_wq_awake_device()
396 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
404 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_awake_rx() local
406 struct hci_uart *hu = qca->hu; in qca_wq_awake_rx()
413 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
414 qca->rx_ibs_state = HCI_IBS_RX_AWAKE; in qca_wq_awake_rx()
422 qca->ibs_sent_wacks++; in qca_wq_awake_rx()
424 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
432 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_serial_rx_clock_vote_off() local
434 struct hci_uart *hu = qca->hu; in qca_wq_serial_rx_clock_vote_off()
443 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_serial_tx_clock_vote_off() local
445 struct hci_uart *hu = qca->hu; in qca_wq_serial_tx_clock_vote_off()
460 struct qca_data *qca = from_timer(qca, t, tx_idle_timer); in hci_ibs_tx_idle_timeout() local
461 struct hci_uart *hu = qca->hu; in hci_ibs_tx_idle_timeout()
464 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
466 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_tx_idle_timeout()
469 switch (qca->tx_ibs_state) { in hci_ibs_tx_idle_timeout()
476 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in hci_ibs_tx_idle_timeout()
477 qca->ibs_sent_slps++; in hci_ibs_tx_idle_timeout()
478 queue_work(qca->workqueue, &qca->ws_tx_vote_off); in hci_ibs_tx_idle_timeout()
484 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
488 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_tx_idle_timeout()
493 struct qca_data *qca = from_timer(qca, t, wake_retrans_timer); in hci_ibs_wake_retrans_timeout() local
494 struct hci_uart *hu = qca->hu; in hci_ibs_wake_retrans_timeout()
499 hu, qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
501 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_wake_retrans_timeout()
505 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in hci_ibs_wake_retrans_timeout()
506 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
510 switch (qca->tx_ibs_state) { in hci_ibs_wake_retrans_timeout()
518 qca->ibs_sent_wakes++; in hci_ibs_wake_retrans_timeout()
519 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in hci_ibs_wake_retrans_timeout()
520 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in hci_ibs_wake_retrans_timeout()
526 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
530 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
539 struct qca_data *qca = container_of(work, struct qca_data, in qca_controller_memdump_timeout() local
541 struct hci_uart *hu = qca->hu; in qca_controller_memdump_timeout()
543 mutex_lock(&qca->hci_memdump_lock); in qca_controller_memdump_timeout()
544 if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { in qca_controller_memdump_timeout()
545 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_controller_memdump_timeout()
546 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { in qca_controller_memdump_timeout()
554 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump_timeout()
562 struct qca_data *qca; in qca_open() local
569 qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); in qca_open()
570 if (!qca) in qca_open()
573 skb_queue_head_init(&qca->txq); in qca_open()
574 skb_queue_head_init(&qca->tx_wait_q); in qca_open()
575 skb_queue_head_init(&qca->rx_memdump_q); in qca_open()
576 spin_lock_init(&qca->hci_ibs_lock); in qca_open()
577 mutex_init(&qca->hci_memdump_lock); in qca_open()
578 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0); in qca_open()
579 if (!qca->workqueue) { in qca_open()
581 kfree(qca); in qca_open()
585 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx); in qca_open()
586 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device); in qca_open()
587 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off); in qca_open()
588 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off); in qca_open()
589 INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump); in qca_open()
590 INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout, in qca_open()
592 init_waitqueue_head(&qca->suspend_wait_q); in qca_open()
594 qca->hu = hu; in qca_open()
595 init_completion(&qca->drop_ev_comp); in qca_open()
598 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_open()
599 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in qca_open()
601 qca->vote_last_jif = jiffies; in qca_open()
603 hu->priv = qca; in qca_open()
616 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0); in qca_open()
617 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; in qca_open()
619 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); in qca_open()
620 qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS; in qca_open()
623 qca->tx_idle_delay, qca->wake_retrans); in qca_open()
631 struct qca_data *qca = hu->priv; in qca_debugfs_init() local
642 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); in qca_debugfs_init()
643 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); in qca_debugfs_init()
645 &qca->ibs_sent_slps); in qca_debugfs_init()
647 &qca->ibs_sent_wakes); in qca_debugfs_init()
649 &qca->ibs_sent_wacks); in qca_debugfs_init()
651 &qca->ibs_recv_slps); in qca_debugfs_init()
653 &qca->ibs_recv_wakes); in qca_debugfs_init()
655 &qca->ibs_recv_wacks); in qca_debugfs_init()
656 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote); in qca_debugfs_init()
657 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on); in qca_debugfs_init()
658 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off); in qca_debugfs_init()
659 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote); in qca_debugfs_init()
660 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on); in qca_debugfs_init()
661 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off); in qca_debugfs_init()
662 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on); in qca_debugfs_init()
663 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off); in qca_debugfs_init()
664 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms); in qca_debugfs_init()
665 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); in qca_debugfs_init()
669 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); in qca_debugfs_init()
671 &qca->tx_idle_delay); in qca_debugfs_init()
677 struct qca_data *qca = hu->priv; in qca_flush() local
681 skb_queue_purge(&qca->tx_wait_q); in qca_flush()
682 skb_queue_purge(&qca->txq); in qca_flush()
690 struct qca_data *qca = hu->priv; in qca_close() local
696 skb_queue_purge(&qca->tx_wait_q); in qca_close()
697 skb_queue_purge(&qca->txq); in qca_close()
698 skb_queue_purge(&qca->rx_memdump_q); in qca_close()
699 destroy_workqueue(qca->workqueue); in qca_close()
700 del_timer_sync(&qca->tx_idle_timer); in qca_close()
701 del_timer_sync(&qca->wake_retrans_timer); in qca_close()
702 qca->hu = NULL; in qca_close()
704 kfree_skb(qca->rx_skb); in qca_close()
708 kfree(qca); in qca_close()
718 struct qca_data *qca = hu->priv; in device_want_to_wakeup() local
722 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
724 qca->ibs_recv_wakes++; in device_want_to_wakeup()
727 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in device_want_to_wakeup()
728 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
732 switch (qca->rx_ibs_state) { in device_want_to_wakeup()
737 queue_work(qca->workqueue, &qca->ws_awake_rx); in device_want_to_wakeup()
738 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
749 qca->ibs_sent_wacks++; in device_want_to_wakeup()
755 qca->rx_ibs_state); in device_want_to_wakeup()
759 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
770 struct qca_data *qca = hu->priv; in device_want_to_sleep() local
772 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state); in device_want_to_sleep()
774 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
776 qca->ibs_recv_slps++; in device_want_to_sleep()
778 switch (qca->rx_ibs_state) { in device_want_to_sleep()
781 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in device_want_to_sleep()
783 queue_work(qca->workqueue, &qca->ws_rx_vote_off); in device_want_to_sleep()
792 qca->rx_ibs_state); in device_want_to_sleep()
796 wake_up_interruptible(&qca->suspend_wait_q); in device_want_to_sleep()
798 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
806 struct qca_data *qca = hu->priv; in device_woke_up() local
811 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_woke_up()
813 qca->ibs_recv_wacks++; in device_woke_up()
816 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in device_woke_up()
817 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
821 switch (qca->tx_ibs_state) { in device_woke_up()
825 qca->tx_ibs_state); in device_woke_up()
830 while ((skb = skb_dequeue(&qca->tx_wait_q))) in device_woke_up()
831 skb_queue_tail(&qca->txq, skb); in device_woke_up()
834 del_timer(&qca->wake_retrans_timer); in device_woke_up()
835 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in device_woke_up()
836 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in device_woke_up()
837 qca->tx_ibs_state = HCI_IBS_TX_AWAKE; in device_woke_up()
843 qca->tx_ibs_state); in device_woke_up()
847 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
859 struct qca_data *qca = hu->priv; in qca_enqueue() local
862 qca->tx_ibs_state); in qca_enqueue()
864 if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { in qca_enqueue()
874 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_enqueue()
880 if (test_bit(QCA_IBS_DISABLED, &qca->flags) || in qca_enqueue()
881 test_bit(QCA_SUSPENDING, &qca->flags)) { in qca_enqueue()
882 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
883 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
888 switch (qca->tx_ibs_state) { in qca_enqueue()
891 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
892 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in qca_enqueue()
893 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in qca_enqueue()
899 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
901 qca->tx_ibs_state = HCI_IBS_TX_WAKING; in qca_enqueue()
903 queue_work(qca->workqueue, &qca->ws_awake_device); in qca_enqueue()
909 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
914 qca->tx_ibs_state); in qca_enqueue()
919 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
975 struct qca_data *qca = container_of(work, struct qca_data, in qca_controller_memdump() local
977 struct hci_uart *hu = qca->hu; in qca_controller_memdump()
980 struct qca_memdump_data *qca_memdump = qca->qca_memdump; in qca_controller_memdump()
989 while ((skb = skb_dequeue(&qca->rx_memdump_q))) { in qca_controller_memdump()
991 mutex_lock(&qca->hci_memdump_lock); in qca_controller_memdump()
995 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || in qca_controller_memdump()
996 qca->memdump_state == QCA_MEMDUMP_COLLECTED) { in qca_controller_memdump()
997 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1005 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1009 qca->qca_memdump = qca_memdump; in qca_controller_memdump()
1012 qca->memdump_state = QCA_MEMDUMP_COLLECTING; in qca_controller_memdump()
1025 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_controller_memdump()
1026 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1033 qca->qca_memdump = NULL; in qca_controller_memdump()
1034 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1040 queue_delayed_work(qca->workqueue, in qca_controller_memdump()
1041 &qca->ctrl_memdump_timeout, in qca_controller_memdump()
1061 qca->qca_memdump = NULL; in qca_controller_memdump()
1062 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1114 qca->qca_memdump = qca_memdump; in qca_controller_memdump()
1124 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_controller_memdump()
1125 kfree(qca->qca_memdump); in qca_controller_memdump()
1126 qca->qca_memdump = NULL; in qca_controller_memdump()
1127 qca->memdump_state = QCA_MEMDUMP_COLLECTED; in qca_controller_memdump()
1128 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1131 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1140 struct qca_data *qca = hu->priv; in qca_controller_memdump_event() local
1142 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_controller_memdump_event()
1143 skb_queue_tail(&qca->rx_memdump_q, skb); in qca_controller_memdump_event()
1144 queue_work(qca->workqueue, &qca->ctrl_memdump_evt); in qca_controller_memdump_event()
1152 struct qca_data *qca = hu->priv; in qca_recv_event() local
1154 if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) { in qca_recv_event()
1168 complete(&qca->drop_ev_comp); in qca_recv_event()
1218 struct qca_data *qca = hu->priv; in qca_recv() local
1223 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, in qca_recv()
1225 if (IS_ERR(qca->rx_skb)) { in qca_recv()
1226 int err = PTR_ERR(qca->rx_skb); in qca_recv()
1228 qca->rx_skb = NULL; in qca_recv()
1237 struct qca_data *qca = hu->priv; in qca_dequeue() local
1239 return skb_dequeue(&qca->txq); in qca_dequeue()
1281 struct qca_data *qca = hu->priv; in qca_set_baudrate() local
1300 skb_queue_tail(&qca->txq, skb); in qca_set_baudrate()
1305 while (!skb_queue_empty(&qca->txq)) in qca_set_baudrate()
1407 struct qca_data *qca = hu->priv; in qca_set_speed() local
1429 reinit_completion(&qca->drop_ev_comp); in qca_set_speed()
1430 set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1450 if (!wait_for_completion_timeout(&qca->drop_ev_comp, in qca_set_speed()
1457 clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1466 struct qca_data *qca = hu->priv; in qca_send_crashbuffer() local
1483 skb_queue_tail(&qca->txq, skb); in qca_send_crashbuffer()
1492 struct qca_data *qca = hu->priv; in qca_wait_for_dump_collection() local
1494 wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION, in qca_wait_for_dump_collection()
1497 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_wait_for_dump_collection()
1503 struct qca_data *qca = hu->priv; in qca_hw_error() local
1505 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_hw_error()
1506 set_bit(QCA_HW_ERROR_EVENT, &qca->flags); in qca_hw_error()
1507 bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state); in qca_hw_error()
1509 if (qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_hw_error()
1516 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_hw_error()
1519 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { in qca_hw_error()
1527 mutex_lock(&qca->hci_memdump_lock); in qca_hw_error()
1528 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { in qca_hw_error()
1530 if (qca->qca_memdump) { in qca_hw_error()
1531 vfree(qca->qca_memdump->memdump_buf_head); in qca_hw_error()
1532 kfree(qca->qca_memdump); in qca_hw_error()
1533 qca->qca_memdump = NULL; in qca_hw_error()
1535 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_hw_error()
1536 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_hw_error()
1538 mutex_unlock(&qca->hci_memdump_lock); in qca_hw_error()
1540 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || in qca_hw_error()
1541 qca->memdump_state == QCA_MEMDUMP_COLLECTED) { in qca_hw_error()
1542 cancel_work_sync(&qca->ctrl_memdump_evt); in qca_hw_error()
1543 skb_queue_purge(&qca->rx_memdump_q); in qca_hw_error()
1546 clear_bit(QCA_HW_ERROR_EVENT, &qca->flags); in qca_hw_error()
1552 struct qca_data *qca = hu->priv; in qca_cmd_timeout() local
1554 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_cmd_timeout()
1555 if (qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_cmd_timeout()
1556 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_cmd_timeout()
1559 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { in qca_cmd_timeout()
1567 mutex_lock(&qca->hci_memdump_lock); in qca_cmd_timeout()
1568 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { in qca_cmd_timeout()
1569 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_cmd_timeout()
1570 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { in qca_cmd_timeout()
1577 mutex_unlock(&qca->hci_memdump_lock); in qca_cmd_timeout()
1668 struct qca_data *qca = hu->priv; in qca_power_on() local
1689 clear_bit(QCA_BT_OFF, &qca->flags); in qca_power_on()
1696 struct qca_data *qca = hu->priv; in qca_setup() local
1708 clear_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1710 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_setup()
1721 qca->memdump_state = QCA_MEMDUMP_IDLE; in qca_setup()
1728 clear_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_setup()
1764 clear_bit(QCA_IBS_DISABLED, &qca->flags); in qca_setup()
1771 set_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1778 set_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1881 struct qca_data *qca = hu->priv; in qca_power_shutdown() local
1890 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
1891 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_power_shutdown()
1893 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
1919 set_bit(QCA_BT_OFF, &qca->flags); in qca_power_shutdown()
1925 struct qca_data *qca = hu->priv; in qca_power_off() local
1931 del_timer_sync(&qca->wake_retrans_timer); in qca_power_off()
1932 del_timer_sync(&qca->tx_idle_timer); in qca_power_off()
1936 && qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_power_off()
1988 static int qca_init_regulators(struct qca_power *qca, in qca_init_regulators() argument
1995 bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL); in qca_init_regulators()
2002 ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk); in qca_init_regulators()
2012 qca->vreg_bulk = bulk; in qca_init_regulators()
2013 qca->num_vregs = num_vregs; in qca_init_regulators()
2191 struct qca_data *qca = hu->priv; in qca_suspend() local
2198 set_bit(QCA_SUSPENDING, &qca->flags); in qca_suspend()
2203 if (test_bit(QCA_ROM_FW, &qca->flags)) in qca_suspend()
2210 if (test_bit(QCA_BT_OFF, &qca->flags) && in qca_suspend()
2211 !test_bit(QCA_SSR_TRIGGERED, &qca->flags)) in qca_suspend()
2214 if (test_bit(QCA_IBS_DISABLED, &qca->flags) || in qca_suspend()
2215 test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { in qca_suspend()
2216 wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ? in qca_suspend()
2224 wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED, in qca_suspend()
2227 if (test_bit(QCA_IBS_DISABLED, &qca->flags)) { in qca_suspend()
2234 cancel_work_sync(&qca->ws_awake_device); in qca_suspend()
2235 cancel_work_sync(&qca->ws_awake_rx); in qca_suspend()
2237 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in qca_suspend()
2240 switch (qca->tx_ibs_state) { in qca_suspend()
2242 del_timer(&qca->wake_retrans_timer); in qca_suspend()
2245 del_timer(&qca->tx_idle_timer); in qca_suspend()
2256 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_suspend()
2257 qca->ibs_sent_slps++; in qca_suspend()
2265 BT_ERR("Spurious tx state %d", qca->tx_ibs_state); in qca_suspend()
2270 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_suspend()
2284 ret = wait_event_interruptible_timeout(qca->suspend_wait_q, in qca_suspend()
2285 qca->rx_ibs_state == HCI_IBS_RX_ASLEEP, in qca_suspend()
2295 clear_bit(QCA_SUSPENDING, &qca->flags); in qca_suspend()
2305 struct qca_data *qca = hu->priv; in qca_resume() local
2307 clear_bit(QCA_SUSPENDING, &qca->flags); in qca_resume()