Lines Matching +full:event +full:- +full:deep

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2021-2022, Intel Corporation.
23 #include <linux/dma-mapping.h>
84 if (ret == -ETIMEDOUT) in t7xx_wait_pm_config()
85 dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n"); in t7xx_wait_pm_config()
92 struct pci_dev *pdev = t7xx_dev->pdev; in t7xx_pci_pm_init()
94 INIT_LIST_HEAD(&t7xx_dev->md_pm_entities); in t7xx_pci_pm_init()
95 mutex_init(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_init()
96 spin_lock_init(&t7xx_dev->md_pm_lock); in t7xx_pci_pm_init()
97 init_completion(&t7xx_dev->sleep_lock_acquire); in t7xx_pci_pm_init()
98 init_completion(&t7xx_dev->pm_sr_ack); in t7xx_pci_pm_init()
99 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); in t7xx_pci_pm_init()
101 device_init_wakeup(&pdev->dev, true); in t7xx_pci_pm_init()
102 dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags | in t7xx_pci_pm_init()
106 pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS); in t7xx_pci_pm_init()
107 pm_runtime_use_autosuspend(&pdev->dev); in t7xx_pci_pm_init()
114 /* Enable the PCIe resource lock only after MD deep sleep is done */ in t7xx_pci_pm_init_late()
122 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); in t7xx_pci_pm_init_late()
124 pm_runtime_put_noidle(&t7xx_dev->pdev->dev); in t7xx_pci_pm_init_late()
129 /* The device is kept in FSM re-init flow in t7xx_pci_pm_reinit()
132 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); in t7xx_pci_pm_reinit()
134 pm_runtime_get_noresume(&t7xx_dev->pdev->dev); in t7xx_pci_pm_reinit()
144 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION); in t7xx_pci_pm_exp_detected()
151 mutex_lock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_register()
152 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in t7xx_pci_pm_entity_register()
153 if (entity->id == pm_entity->id) { in t7xx_pci_pm_entity_register()
154 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_register()
155 return -EEXIST; in t7xx_pci_pm_entity_register()
159 list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities); in t7xx_pci_pm_entity_register()
160 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_register()
168 mutex_lock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_unregister()
169 list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) { in t7xx_pci_pm_entity_unregister()
170 if (entity->id == pm_entity->id) { in t7xx_pci_pm_entity_unregister()
171 list_del(&pm_entity->entity); in t7xx_pci_pm_entity_unregister()
172 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_unregister()
177 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_unregister()
179 return -ENXIO; in t7xx_pci_pm_entity_unregister()
184 struct device *dev = &t7xx_dev->pdev->dev; in t7xx_pci_sleep_disable_complete()
187 ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire, in t7xx_pci_sleep_disable_complete()
196 * t7xx_pci_disable_sleep() - Disable deep sleep capability.
199 * Lock the deep sleep capability, note that the device can still go into deep sleep
200 * state while device is in D0 state, from the host's point-of-view.
202 * If device is in deep sleep state, wake up the device and disable deep sleep capability.
208 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_disable_sleep()
209 t7xx_dev->sleep_disable_count++; in t7xx_pci_disable_sleep()
210 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) in t7xx_pci_disable_sleep()
213 if (t7xx_dev->sleep_disable_count == 1) { in t7xx_pci_disable_sleep()
216 reinit_completion(&t7xx_dev->sleep_lock_acquire); in t7xx_pci_disable_sleep()
225 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_disable_sleep()
229 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_disable_sleep()
230 complete_all(&t7xx_dev->sleep_lock_acquire); in t7xx_pci_disable_sleep()
234 * t7xx_pci_enable_sleep() - Enable deep sleep capability.
237 * After enabling deep sleep, device can enter into deep sleep state.
243 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_enable_sleep()
244 t7xx_dev->sleep_disable_count--; in t7xx_pci_enable_sleep()
245 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) in t7xx_pci_enable_sleep()
248 if (t7xx_dev->sleep_disable_count == 0) in t7xx_pci_enable_sleep()
252 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_enable_sleep()
259 reinit_completion(&t7xx_dev->pm_sr_ack); in t7xx_send_pm_request()
261 wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack, in t7xx_send_pm_request()
264 return -ETIMEDOUT; in t7xx_send_pm_request()
277 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { in __t7xx_pci_pm_suspend()
278 dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n"); in __t7xx_pci_pm_suspend()
279 return -EFAULT; in __t7xx_pci_pm_suspend()
289 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); in __t7xx_pci_pm_suspend()
291 t7xx_dev->rgu_pci_irq_en = false; in __t7xx_pci_pm_suspend()
293 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_suspend()
294 if (!entity->suspend) in __t7xx_pci_pm_suspend()
297 ret = entity->suspend(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_suspend()
299 entity_id = entity->id; in __t7xx_pci_pm_suspend()
300 dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id); in __t7xx_pci_pm_suspend()
307 dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret); in __t7xx_pci_pm_suspend()
314 dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret); in __t7xx_pci_pm_suspend()
318 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_suspend()
319 if (entity->suspend_late) in __t7xx_pci_pm_suspend()
320 entity->suspend_late(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_suspend()
327 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_suspend()
328 if (entity_id == entity->id) in __t7xx_pci_pm_suspend()
331 if (entity->resume) in __t7xx_pci_pm_suspend()
332 entity->resume(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_suspend()
336 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); in __t7xx_pci_pm_suspend()
349 * following function will re-enable PCIe interrupts. in t7xx_pcie_interrupt_reinit()
359 ret = pcim_enable_device(t7xx_dev->pdev); in t7xx_pcie_reinit()
374 static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event) in t7xx_send_fsm_command() argument
376 struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl; in t7xx_send_fsm_command()
377 struct device *dev = &t7xx_dev->pdev->dev; in t7xx_send_fsm_command()
378 int ret = -EINVAL; in t7xx_send_fsm_command()
380 switch (event) { in t7xx_send_fsm_command()
388 t7xx_dev->rgu_pci_irq_en = true; in t7xx_send_fsm_command()
398 dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret); in t7xx_send_fsm_command()
411 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { in __t7xx_pci_pm_resume()
449 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); in __t7xx_pci_pm_resume()
450 t7xx_dev->rgu_pci_irq_en = true; in __t7xx_pci_pm_resume()
475 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); in __t7xx_pci_pm_resume()
483 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_resume()
484 if (entity->resume_early) in __t7xx_pci_pm_resume()
485 entity->resume_early(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_resume()
490 dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret); in __t7xx_pci_pm_resume()
494 dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret); in __t7xx_pci_pm_resume()
496 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_resume()
497 if (entity->resume) { in __t7xx_pci_pm_resume()
498 ret = entity->resume(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_resume()
500 dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n", in __t7xx_pci_pm_resume()
501 entity->id, ret); in __t7xx_pci_pm_resume()
505 t7xx_dev->rgu_pci_irq_en = true; in __t7xx_pci_pm_resume()
508 pm_runtime_mark_last_busy(&pdev->dev); in __t7xx_pci_pm_resume()
509 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); in __t7xx_pci_pm_resume()
579 if (!t7xx_dev->intr_handler[i]) in t7xx_request_irq()
582 irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d", in t7xx_request_irq()
583 dev_driver_string(&pdev->dev), i); in t7xx_request_irq()
585 ret = -ENOMEM; in t7xx_request_irq()
590 ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i], in t7xx_request_irq()
591 t7xx_dev->intr_thread[i], 0, irq_descr, in t7xx_request_irq()
592 t7xx_dev->callback_param[i]); in t7xx_request_irq()
594 dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret); in t7xx_request_irq()
600 while (i--) { in t7xx_request_irq()
601 if (!t7xx_dev->intr_handler[i]) in t7xx_request_irq()
604 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); in t7xx_request_irq()
613 struct pci_dev *pdev = t7xx_dev->pdev; in t7xx_setup_msix()
616 /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */ in t7xx_setup_msix()
619 dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret); in t7xx_setup_msix()
637 if (!t7xx_dev->pdev->msix_cap) in t7xx_interrupt_init()
638 return -EINVAL; in t7xx_interrupt_init()
653 t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base + in t7xx_pci_infracfg_ao_calc()
654 INFRACFG_AO_DEV_CHIP - in t7xx_pci_infracfg_ao_calc()
655 t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; in t7xx_pci_infracfg_ao_calc()
663 t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL); in t7xx_pci_probe()
665 return -ENOMEM; in t7xx_pci_probe()
668 t7xx_dev->pdev = pdev; in t7xx_pci_probe()
679 dev_err(&pdev->dev, "Could not request BARs: %d\n", ret); in t7xx_pci_probe()
680 return -ENOMEM; in t7xx_pci_probe()
683 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); in t7xx_pci_probe()
685 dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret); in t7xx_pci_probe()
689 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); in t7xx_pci_probe()
691 dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret); in t7xx_pci_probe()
696 t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE]; in t7xx_pci_probe()
733 if (!t7xx_dev->intr_handler[i]) in t7xx_pci_remove()
736 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); in t7xx_pci_remove()
739 pci_free_irq_vectors(t7xx_dev->pdev); in t7xx_pci_remove()