Lines Matching refs:qca

214 	struct qca_data *qca = hu->priv;  in serial_clock_vote()  local
217 bool old_vote = (qca->tx_vote | qca->rx_vote); in serial_clock_vote()
222 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
225 qca->vote_off_ms += diff; in serial_clock_vote()
227 qca->vote_on_ms += diff; in serial_clock_vote()
231 qca->tx_vote = true; in serial_clock_vote()
232 qca->tx_votes_on++; in serial_clock_vote()
237 qca->rx_vote = true; in serial_clock_vote()
238 qca->rx_votes_on++; in serial_clock_vote()
243 qca->tx_vote = false; in serial_clock_vote()
244 qca->tx_votes_off++; in serial_clock_vote()
245 new_vote = qca->rx_vote | qca->tx_vote; in serial_clock_vote()
249 qca->rx_vote = false; in serial_clock_vote()
250 qca->rx_votes_off++; in serial_clock_vote()
251 new_vote = qca->rx_vote | qca->tx_vote; in serial_clock_vote()
268 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
271 qca->votes_on++; in serial_clock_vote()
272 qca->vote_off_ms += diff; in serial_clock_vote()
274 qca->votes_off++; in serial_clock_vote()
275 qca->vote_on_ms += diff; in serial_clock_vote()
277 qca->vote_last_jif = jiffies; in serial_clock_vote()
288 struct qca_data *qca = hu->priv; in send_hci_ibs_cmd() local
301 skb_queue_tail(&qca->txq, skb); in send_hci_ibs_cmd()
308 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_awake_device() local
310 struct hci_uart *hu = qca->hu; in qca_wq_awake_device()
319 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
325 qca->ibs_sent_wakes++; in qca_wq_awake_device()
328 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in qca_wq_awake_device()
329 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in qca_wq_awake_device()
331 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
339 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_awake_rx() local
341 struct hci_uart *hu = qca->hu; in qca_wq_awake_rx()
348 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
349 qca->rx_ibs_state = HCI_IBS_RX_AWAKE; in qca_wq_awake_rx()
357 qca->ibs_sent_wacks++; in qca_wq_awake_rx()
359 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
367 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_serial_rx_clock_vote_off() local
369 struct hci_uart *hu = qca->hu; in qca_wq_serial_rx_clock_vote_off()
378 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_serial_tx_clock_vote_off() local
380 struct hci_uart *hu = qca->hu; in qca_wq_serial_tx_clock_vote_off()
395 struct qca_data *qca = from_timer(qca, t, tx_idle_timer); in hci_ibs_tx_idle_timeout() local
396 struct hci_uart *hu = qca->hu; in hci_ibs_tx_idle_timeout()
399 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
401 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_tx_idle_timeout()
404 switch (qca->tx_ibs_state) { in hci_ibs_tx_idle_timeout()
411 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in hci_ibs_tx_idle_timeout()
412 qca->ibs_sent_slps++; in hci_ibs_tx_idle_timeout()
413 queue_work(qca->workqueue, &qca->ws_tx_vote_off); in hci_ibs_tx_idle_timeout()
421 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
425 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_tx_idle_timeout()
430 struct qca_data *qca = from_timer(qca, t, wake_retrans_timer); in hci_ibs_wake_retrans_timeout() local
431 struct hci_uart *hu = qca->hu; in hci_ibs_wake_retrans_timeout()
436 hu, qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
438 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_wake_retrans_timeout()
441 switch (qca->tx_ibs_state) { in hci_ibs_wake_retrans_timeout()
449 qca->ibs_sent_wakes++; in hci_ibs_wake_retrans_timeout()
450 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in hci_ibs_wake_retrans_timeout()
451 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in hci_ibs_wake_retrans_timeout()
459 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
463 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
473 struct qca_data *qca; in qca_open() local
481 qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); in qca_open()
482 if (!qca) in qca_open()
485 skb_queue_head_init(&qca->txq); in qca_open()
486 skb_queue_head_init(&qca->tx_wait_q); in qca_open()
487 spin_lock_init(&qca->hci_ibs_lock); in qca_open()
488 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0); in qca_open()
489 if (!qca->workqueue) { in qca_open()
491 kfree(qca); in qca_open()
495 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx); in qca_open()
496 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device); in qca_open()
497 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off); in qca_open()
498 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off); in qca_open()
500 qca->hu = hu; in qca_open()
501 init_completion(&qca->drop_ev_comp); in qca_open()
504 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_open()
505 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in qca_open()
507 qca->vote_last_jif = jiffies; in qca_open()
509 hu->priv = qca; in qca_open()
523 destroy_workqueue(qca->workqueue); in qca_open()
524 kfree_skb(qca->rx_skb); in qca_open()
526 kfree(qca); in qca_open()
532 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0); in qca_open()
533 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; in qca_open()
535 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); in qca_open()
536 qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS; in qca_open()
539 qca->tx_idle_delay, qca->wake_retrans); in qca_open()
547 struct qca_data *qca = hu->priv; in qca_debugfs_init() local
558 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); in qca_debugfs_init()
559 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); in qca_debugfs_init()
561 &qca->ibs_sent_slps); in qca_debugfs_init()
563 &qca->ibs_sent_wakes); in qca_debugfs_init()
565 &qca->ibs_sent_wacks); in qca_debugfs_init()
567 &qca->ibs_recv_slps); in qca_debugfs_init()
569 &qca->ibs_recv_wakes); in qca_debugfs_init()
571 &qca->ibs_recv_wacks); in qca_debugfs_init()
572 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote); in qca_debugfs_init()
573 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on); in qca_debugfs_init()
574 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off); in qca_debugfs_init()
575 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote); in qca_debugfs_init()
576 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on); in qca_debugfs_init()
577 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off); in qca_debugfs_init()
578 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on); in qca_debugfs_init()
579 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off); in qca_debugfs_init()
580 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms); in qca_debugfs_init()
581 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); in qca_debugfs_init()
585 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); in qca_debugfs_init()
587 &qca->tx_idle_delay); in qca_debugfs_init()
593 struct qca_data *qca = hu->priv; in qca_flush() local
597 skb_queue_purge(&qca->tx_wait_q); in qca_flush()
598 skb_queue_purge(&qca->txq); in qca_flush()
607 struct qca_data *qca = hu->priv; in qca_close() local
613 skb_queue_purge(&qca->tx_wait_q); in qca_close()
614 skb_queue_purge(&qca->txq); in qca_close()
615 del_timer(&qca->tx_idle_timer); in qca_close()
616 del_timer(&qca->wake_retrans_timer); in qca_close()
617 destroy_workqueue(qca->workqueue); in qca_close()
618 qca->hu = NULL; in qca_close()
629 kfree_skb(qca->rx_skb); in qca_close()
633 kfree(qca); in qca_close()
643 struct qca_data *qca = hu->priv; in device_want_to_wakeup() local
647 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
649 qca->ibs_recv_wakes++; in device_want_to_wakeup()
651 switch (qca->rx_ibs_state) { in device_want_to_wakeup()
656 queue_work(qca->workqueue, &qca->ws_awake_rx); in device_want_to_wakeup()
657 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
668 qca->ibs_sent_wacks++; in device_want_to_wakeup()
674 qca->rx_ibs_state); in device_want_to_wakeup()
678 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
689 struct qca_data *qca = hu->priv; in device_want_to_sleep() local
691 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state); in device_want_to_sleep()
693 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
695 qca->ibs_recv_slps++; in device_want_to_sleep()
697 switch (qca->rx_ibs_state) { in device_want_to_sleep()
700 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in device_want_to_sleep()
702 queue_work(qca->workqueue, &qca->ws_rx_vote_off); in device_want_to_sleep()
711 qca->rx_ibs_state); in device_want_to_sleep()
715 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
723 struct qca_data *qca = hu->priv; in device_woke_up() local
728 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_woke_up()
730 qca->ibs_recv_wacks++; in device_woke_up()
732 switch (qca->tx_ibs_state) { in device_woke_up()
736 qca->tx_ibs_state); in device_woke_up()
741 while ((skb = skb_dequeue(&qca->tx_wait_q))) in device_woke_up()
742 skb_queue_tail(&qca->txq, skb); in device_woke_up()
745 del_timer(&qca->wake_retrans_timer); in device_woke_up()
746 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in device_woke_up()
747 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in device_woke_up()
748 qca->tx_ibs_state = HCI_IBS_TX_AWAKE; in device_woke_up()
756 qca->tx_ibs_state); in device_woke_up()
760 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
772 struct qca_data *qca = hu->priv; in qca_enqueue() local
775 qca->tx_ibs_state); in qca_enqueue()
780 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_enqueue()
785 if (!test_bit(QCA_IBS_ENABLED, &qca->flags)) { in qca_enqueue()
786 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
787 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
792 switch (qca->tx_ibs_state) { in qca_enqueue()
795 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
796 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in qca_enqueue()
797 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in qca_enqueue()
803 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
805 qca->tx_ibs_state = HCI_IBS_TX_WAKING; in qca_enqueue()
807 queue_work(qca->workqueue, &qca->ws_awake_device); in qca_enqueue()
813 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
818 qca->tx_ibs_state); in qca_enqueue()
823 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
880 struct qca_data *qca = hu->priv; in qca_recv_event() local
882 if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) { in qca_recv_event()
896 complete(&qca->drop_ev_comp); in qca_recv_event()
938 struct qca_data *qca = hu->priv; in qca_recv() local
943 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, in qca_recv()
945 if (IS_ERR(qca->rx_skb)) { in qca_recv()
946 int err = PTR_ERR(qca->rx_skb); in qca_recv()
948 qca->rx_skb = NULL; in qca_recv()
957 struct qca_data *qca = hu->priv; in qca_dequeue() local
959 return skb_dequeue(&qca->txq); in qca_dequeue()
1001 struct qca_data *qca = hu->priv; in qca_set_baudrate() local
1020 skb_queue_tail(&qca->txq, skb); in qca_set_baudrate()
1025 while (!skb_queue_empty(&qca->txq)) in qca_set_baudrate()
1125 struct qca_data *qca = hu->priv; in qca_set_speed() local
1146 reinit_completion(&qca->drop_ev_comp); in qca_set_speed()
1147 set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1166 if (!wait_for_completion_timeout(&qca->drop_ev_comp, in qca_set_speed()
1173 clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1233 struct qca_data *qca = hu->priv; in qca_setup() local
1245 clear_bit(QCA_IBS_ENABLED, &qca->flags); in qca_setup()
1295 set_bit(QCA_IBS_ENABLED, &qca->flags); in qca_setup()
1356 struct qca_data *qca = hu->priv; in qca_power_shutdown() local
1363 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
1364 clear_bit(QCA_IBS_ENABLED, &qca->flags); in qca_power_shutdown()
1366 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
1462 static int qca_init_regulators(struct qca_power *qca, in qca_init_regulators() argument
1467 qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs, in qca_init_regulators()
1470 if (!qca->vreg_bulk) in qca_init_regulators()
1474 qca->vreg_bulk[i].supply = vregs[i].name; in qca_init_regulators()
1476 return devm_regulator_bulk_get(qca->dev, num_vregs, qca->vreg_bulk); in qca_init_regulators()