Lines Matching refs:genpd
29 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ argument
34 __routine = genpd->dev_ops.callback; \
45 void (*lock)(struct generic_pm_domain *genpd);
46 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
47 int (*lock_interruptible)(struct generic_pm_domain *genpd);
48 void (*unlock)(struct generic_pm_domain *genpd);
51 static void genpd_lock_mtx(struct generic_pm_domain *genpd) in genpd_lock_mtx() argument
53 mutex_lock(&genpd->mlock); in genpd_lock_mtx()
56 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, in genpd_lock_nested_mtx() argument
59 mutex_lock_nested(&genpd->mlock, depth); in genpd_lock_nested_mtx()
62 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) in genpd_lock_interruptible_mtx() argument
64 return mutex_lock_interruptible(&genpd->mlock); in genpd_lock_interruptible_mtx()
67 static void genpd_unlock_mtx(struct generic_pm_domain *genpd) in genpd_unlock_mtx() argument
69 return mutex_unlock(&genpd->mlock); in genpd_unlock_mtx()
79 static void genpd_lock_spin(struct generic_pm_domain *genpd) in genpd_lock_spin() argument
80 __acquires(&genpd->slock) in genpd_lock_spin()
84 spin_lock_irqsave(&genpd->slock, flags); in genpd_lock_spin()
85 genpd->lock_flags = flags; in genpd_lock_spin()
88 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, in genpd_lock_nested_spin() argument
90 __acquires(&genpd->slock) in genpd_lock_nested_spin()
94 spin_lock_irqsave_nested(&genpd->slock, flags, depth); in genpd_lock_nested_spin()
95 genpd->lock_flags = flags; in genpd_lock_nested_spin()
98 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) in genpd_lock_interruptible_spin() argument
99 __acquires(&genpd->slock) in genpd_lock_interruptible_spin()
103 spin_lock_irqsave(&genpd->slock, flags); in genpd_lock_interruptible_spin()
104 genpd->lock_flags = flags; in genpd_lock_interruptible_spin()
108 static void genpd_unlock_spin(struct generic_pm_domain *genpd) in genpd_unlock_spin() argument
109 __releases(&genpd->slock) in genpd_unlock_spin()
111 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); in genpd_unlock_spin()
126 #define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE) argument
127 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) argument
128 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) argument
129 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) argument
130 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) argument
131 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) argument
134 const struct generic_pm_domain *genpd) in irq_safe_dev_in_no_sleep_domain() argument
138 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); in irq_safe_dev_in_no_sleep_domain()
145 if (ret && !genpd_is_always_on(genpd)) in irq_safe_dev_in_no_sleep_domain()
147 genpd->name); in irq_safe_dev_in_no_sleep_domain()
184 static int genpd_stop_dev(const struct generic_pm_domain *genpd, in genpd_stop_dev() argument
187 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); in genpd_stop_dev()
190 static int genpd_start_dev(const struct generic_pm_domain *genpd, in genpd_start_dev() argument
193 return GENPD_DEV_CALLBACK(genpd, int, start, dev); in genpd_start_dev()
196 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) in genpd_sd_counter_dec() argument
200 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) in genpd_sd_counter_dec()
201 ret = !!atomic_dec_and_test(&genpd->sd_count); in genpd_sd_counter_dec()
206 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) in genpd_sd_counter_inc() argument
208 atomic_inc(&genpd->sd_count); in genpd_sd_counter_inc()
213 static void genpd_update_accounting(struct generic_pm_domain *genpd) in genpd_update_accounting() argument
218 delta = ktime_sub(now, genpd->accounting_time); in genpd_update_accounting()
225 if (genpd->status == GPD_STATE_ACTIVE) { in genpd_update_accounting()
226 int state_idx = genpd->state_idx; in genpd_update_accounting()
228 genpd->states[state_idx].idle_time = in genpd_update_accounting()
229 ktime_add(genpd->states[state_idx].idle_time, delta); in genpd_update_accounting()
231 genpd->on_time = ktime_add(genpd->on_time, delta); in genpd_update_accounting()
234 genpd->accounting_time = now; in genpd_update_accounting()
237 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} in genpd_update_accounting() argument
240 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, in _genpd_reeval_performance_state() argument
248 if (state == genpd->performance_state) in _genpd_reeval_performance_state()
252 if (state > genpd->performance_state) in _genpd_reeval_performance_state()
256 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in _genpd_reeval_performance_state()
277 list_for_each_entry(link, &genpd->master_links, master_node) { in _genpd_reeval_performance_state()
285 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, in _genpd_set_performance_state() argument
292 if (state == genpd->performance_state) in _genpd_set_performance_state()
296 list_for_each_entry(link, &genpd->slave_links, slave_node) { in _genpd_set_performance_state()
303 ret = dev_pm_opp_xlate_performance_state(genpd->opp_table, in _genpd_set_performance_state()
327 ret = genpd->set_performance_state(genpd, state); in _genpd_set_performance_state()
331 genpd->performance_state = state; in _genpd_set_performance_state()
336 list_for_each_entry_continue_reverse(link, &genpd->slave_links, in _genpd_set_performance_state()
378 struct generic_pm_domain *genpd; in dev_pm_genpd_set_performance_state() local
383 genpd = dev_to_genpd_safe(dev); in dev_pm_genpd_set_performance_state()
384 if (!genpd) in dev_pm_genpd_set_performance_state()
387 if (unlikely(!genpd->set_performance_state)) in dev_pm_genpd_set_performance_state()
394 genpd_lock(genpd); in dev_pm_genpd_set_performance_state()
400 state = _genpd_reeval_performance_state(genpd, state); in dev_pm_genpd_set_performance_state()
401 ret = _genpd_set_performance_state(genpd, state, 0); in dev_pm_genpd_set_performance_state()
405 genpd_unlock(genpd); in dev_pm_genpd_set_performance_state()
411 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) in _genpd_power_on() argument
413 unsigned int state_idx = genpd->state_idx; in _genpd_power_on()
418 if (!genpd->power_on) in _genpd_power_on()
422 return genpd->power_on(genpd); in _genpd_power_on()
425 ret = genpd->power_on(genpd); in _genpd_power_on()
430 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) in _genpd_power_on()
433 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; in _genpd_power_on()
434 genpd->max_off_time_changed = true; in _genpd_power_on()
436 genpd->name, "on", elapsed_ns); in _genpd_power_on()
441 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) in _genpd_power_off() argument
443 unsigned int state_idx = genpd->state_idx; in _genpd_power_off()
448 if (!genpd->power_off) in _genpd_power_off()
452 return genpd->power_off(genpd); in _genpd_power_off()
455 ret = genpd->power_off(genpd); in _genpd_power_off()
460 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) in _genpd_power_off()
463 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; in _genpd_power_off()
464 genpd->max_off_time_changed = true; in _genpd_power_off()
466 genpd->name, "off", elapsed_ns); in _genpd_power_off()
478 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) in genpd_queue_power_off_work() argument
480 queue_work(pm_wq, &genpd->power_off_work); in genpd_queue_power_off_work()
494 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, in genpd_power_off() argument
506 if (!genpd_status_on(genpd) || genpd->prepared_count > 0) in genpd_power_off()
514 if (genpd_is_always_on(genpd) || in genpd_power_off()
515 genpd_is_rpm_always_on(genpd) || in genpd_power_off()
516 atomic_read(&genpd->sd_count) > 0) in genpd_power_off()
519 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in genpd_power_off()
531 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) in genpd_power_off()
538 if (genpd->gov && genpd->gov->power_down_ok) { in genpd_power_off()
539 if (!genpd->gov->power_down_ok(&genpd->domain)) in genpd_power_off()
544 if (!genpd->gov) in genpd_power_off()
545 genpd->state_idx = 0; in genpd_power_off()
547 if (genpd->power_off) { in genpd_power_off()
550 if (atomic_read(&genpd->sd_count) > 0) in genpd_power_off()
561 ret = _genpd_power_off(genpd, true); in genpd_power_off()
566 genpd->status = GPD_STATE_POWER_OFF; in genpd_power_off()
567 genpd_update_accounting(genpd); in genpd_power_off()
569 list_for_each_entry(link, &genpd->slave_links, slave_node) { in genpd_power_off()
587 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) in genpd_power_on() argument
592 if (genpd_status_on(genpd)) in genpd_power_on()
600 list_for_each_entry(link, &genpd->slave_links, slave_node) { in genpd_power_on()
615 ret = _genpd_power_on(genpd, true); in genpd_power_on()
619 genpd->status = GPD_STATE_ACTIVE; in genpd_power_on()
620 genpd_update_accounting(genpd); in genpd_power_on()
626 &genpd->slave_links, in genpd_power_on()
647 struct generic_pm_domain *genpd; in genpd_dev_pm_qos_notifier() local
656 genpd = dev_to_genpd(dev); in genpd_dev_pm_qos_notifier()
658 genpd = ERR_PTR(-ENODATA); in genpd_dev_pm_qos_notifier()
663 if (!IS_ERR(genpd)) { in genpd_dev_pm_qos_notifier()
664 genpd_lock(genpd); in genpd_dev_pm_qos_notifier()
665 genpd->max_off_time_changed = true; in genpd_dev_pm_qos_notifier()
666 genpd_unlock(genpd); in genpd_dev_pm_qos_notifier()
683 struct generic_pm_domain *genpd; in genpd_power_off_work_fn() local
685 genpd = container_of(work, struct generic_pm_domain, power_off_work); in genpd_power_off_work_fn()
687 genpd_lock(genpd); in genpd_power_off_work_fn()
688 genpd_power_off(genpd, false, 0); in genpd_power_off_work_fn()
689 genpd_unlock(genpd); in genpd_power_off_work_fn()
748 struct generic_pm_domain *genpd; in genpd_runtime_suspend() local
758 genpd = dev_to_genpd(dev); in genpd_runtime_suspend()
759 if (IS_ERR(genpd)) in genpd_runtime_suspend()
768 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; in genpd_runtime_suspend()
781 ret = genpd_stop_dev(genpd, dev); in genpd_runtime_suspend()
794 genpd->max_off_time_changed = true; in genpd_runtime_suspend()
803 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) in genpd_runtime_suspend()
806 genpd_lock(genpd); in genpd_runtime_suspend()
807 genpd_power_off(genpd, true, 0); in genpd_runtime_suspend()
808 genpd_unlock(genpd); in genpd_runtime_suspend()
823 struct generic_pm_domain *genpd; in genpd_runtime_resume() local
833 genpd = dev_to_genpd(dev); in genpd_runtime_resume()
834 if (IS_ERR(genpd)) in genpd_runtime_resume()
841 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) { in genpd_runtime_resume()
846 genpd_lock(genpd); in genpd_runtime_resume()
847 ret = genpd_power_on(genpd, 0); in genpd_runtime_resume()
848 genpd_unlock(genpd); in genpd_runtime_resume()
859 ret = genpd_start_dev(genpd, dev); in genpd_runtime_resume()
874 genpd->max_off_time_changed = true; in genpd_runtime_resume()
882 genpd_stop_dev(genpd, dev); in genpd_runtime_resume()
885 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) { in genpd_runtime_resume()
886 genpd_lock(genpd); in genpd_runtime_resume()
887 genpd_power_off(genpd, true, 0); in genpd_runtime_resume()
888 genpd_unlock(genpd); in genpd_runtime_resume()
907 struct generic_pm_domain *genpd; in genpd_power_off_unused() local
916 list_for_each_entry(genpd, &gpd_list, gpd_list_node) in genpd_power_off_unused()
917 genpd_queue_power_off_work(genpd); in genpd_power_off_unused()
927 static bool genpd_present(const struct generic_pm_domain *genpd) in genpd_present() argument
931 if (IS_ERR_OR_NULL(genpd)) in genpd_present()
935 if (gpd == genpd) in genpd_present()
958 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, in genpd_sync_power_off() argument
963 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) in genpd_sync_power_off()
966 if (genpd->suspended_count != genpd->device_count in genpd_sync_power_off()
967 || atomic_read(&genpd->sd_count) > 0) in genpd_sync_power_off()
971 genpd->state_idx = genpd->state_count - 1; in genpd_sync_power_off()
972 if (_genpd_power_off(genpd, false)) in genpd_sync_power_off()
975 genpd->status = GPD_STATE_POWER_OFF; in genpd_sync_power_off()
977 list_for_each_entry(link, &genpd->slave_links, slave_node) { in genpd_sync_power_off()
1000 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, in genpd_sync_power_on() argument
1005 if (genpd_status_on(genpd)) in genpd_sync_power_on()
1008 list_for_each_entry(link, &genpd->slave_links, slave_node) { in genpd_sync_power_on()
1020 _genpd_power_on(genpd, false); in genpd_sync_power_on()
1022 genpd->status = GPD_STATE_ACTIVE; in genpd_sync_power_on()
1042 const struct generic_pm_domain *genpd) in resume_needed() argument
1049 active_wakeup = genpd_is_active_wakeup(genpd); in resume_needed()
1064 struct generic_pm_domain *genpd; in genpd_prepare() local
1069 genpd = dev_to_genpd(dev); in genpd_prepare()
1070 if (IS_ERR(genpd)) in genpd_prepare()
1078 if (resume_needed(dev, genpd)) in genpd_prepare()
1081 genpd_lock(genpd); in genpd_prepare()
1083 if (genpd->prepared_count++ == 0) in genpd_prepare()
1084 genpd->suspended_count = 0; in genpd_prepare()
1086 genpd_unlock(genpd); in genpd_prepare()
1090 genpd_lock(genpd); in genpd_prepare()
1092 genpd->prepared_count--; in genpd_prepare()
1094 genpd_unlock(genpd); in genpd_prepare()
1112 struct generic_pm_domain *genpd; in genpd_finish_suspend() local
1115 genpd = dev_to_genpd(dev); in genpd_finish_suspend()
1116 if (IS_ERR(genpd)) in genpd_finish_suspend()
1126 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) in genpd_finish_suspend()
1129 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_finish_suspend()
1131 ret = genpd_stop_dev(genpd, dev); in genpd_finish_suspend()
1141 genpd_lock(genpd); in genpd_finish_suspend()
1142 genpd->suspended_count++; in genpd_finish_suspend()
1143 genpd_sync_power_off(genpd, true, 0); in genpd_finish_suspend()
1144 genpd_unlock(genpd); in genpd_finish_suspend()
1171 struct generic_pm_domain *genpd; in genpd_resume_noirq() local
1176 genpd = dev_to_genpd(dev); in genpd_resume_noirq()
1177 if (IS_ERR(genpd)) in genpd_resume_noirq()
1180 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) in genpd_resume_noirq()
1183 genpd_lock(genpd); in genpd_resume_noirq()
1184 genpd_sync_power_on(genpd, true, 0); in genpd_resume_noirq()
1185 genpd->suspended_count--; in genpd_resume_noirq()
1186 genpd_unlock(genpd); in genpd_resume_noirq()
1188 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_resume_noirq()
1190 ret = genpd_start_dev(genpd, dev); in genpd_resume_noirq()
1209 const struct generic_pm_domain *genpd; in genpd_freeze_noirq() local
1214 genpd = dev_to_genpd(dev); in genpd_freeze_noirq()
1215 if (IS_ERR(genpd)) in genpd_freeze_noirq()
1222 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_freeze_noirq()
1224 ret = genpd_stop_dev(genpd, dev); in genpd_freeze_noirq()
1238 const struct generic_pm_domain *genpd; in genpd_thaw_noirq() local
1243 genpd = dev_to_genpd(dev); in genpd_thaw_noirq()
1244 if (IS_ERR(genpd)) in genpd_thaw_noirq()
1247 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_thaw_noirq()
1249 ret = genpd_start_dev(genpd, dev); in genpd_thaw_noirq()
1281 struct generic_pm_domain *genpd; in genpd_restore_noirq() local
1286 genpd = dev_to_genpd(dev); in genpd_restore_noirq()
1287 if (IS_ERR(genpd)) in genpd_restore_noirq()
1294 genpd_lock(genpd); in genpd_restore_noirq()
1295 if (genpd->suspended_count++ == 0) in genpd_restore_noirq()
1301 genpd->status = GPD_STATE_POWER_OFF; in genpd_restore_noirq()
1303 genpd_sync_power_on(genpd, true, 0); in genpd_restore_noirq()
1304 genpd_unlock(genpd); in genpd_restore_noirq()
1306 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_restore_noirq()
1308 ret = genpd_start_dev(genpd, dev); in genpd_restore_noirq()
1327 struct generic_pm_domain *genpd; in genpd_complete() local
1331 genpd = dev_to_genpd(dev); in genpd_complete()
1332 if (IS_ERR(genpd)) in genpd_complete()
1337 genpd_lock(genpd); in genpd_complete()
1339 genpd->prepared_count--; in genpd_complete()
1340 if (!genpd->prepared_count) in genpd_complete()
1341 genpd_queue_power_off_work(genpd); in genpd_complete()
1343 genpd_unlock(genpd); in genpd_complete()
1355 struct generic_pm_domain *genpd; in genpd_syscore_switch() local
1357 genpd = dev_to_genpd(dev); in genpd_syscore_switch()
1358 if (!genpd_present(genpd)) in genpd_syscore_switch()
1362 genpd->suspended_count++; in genpd_syscore_switch()
1363 genpd_sync_power_off(genpd, false, 0); in genpd_syscore_switch()
1365 genpd_sync_power_on(genpd, false, 0); in genpd_syscore_switch()
1366 genpd->suspended_count--; in genpd_syscore_switch()
1449 static void genpd_update_cpumask(struct generic_pm_domain *genpd, in genpd_update_cpumask() argument
1454 if (!genpd_is_cpu_domain(genpd)) in genpd_update_cpumask()
1457 list_for_each_entry(link, &genpd->slave_links, slave_node) { in genpd_update_cpumask()
1466 cpumask_set_cpu(cpu, genpd->cpus); in genpd_update_cpumask()
1468 cpumask_clear_cpu(cpu, genpd->cpus); in genpd_update_cpumask()
1471 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) in genpd_set_cpumask() argument
1474 genpd_update_cpumask(genpd, cpu, true, 0); in genpd_set_cpumask()
1477 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) in genpd_clear_cpumask() argument
1480 genpd_update_cpumask(genpd, cpu, false, 0); in genpd_clear_cpumask()
1483 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) in genpd_get_cpu() argument
1487 if (!genpd_is_cpu_domain(genpd)) in genpd_get_cpu()
1498 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, in genpd_add_device() argument
1506 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) in genpd_add_device()
1513 gpd_data->cpu = genpd_get_cpu(genpd, base_dev); in genpd_add_device()
1515 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; in genpd_add_device()
1519 genpd_lock(genpd); in genpd_add_device()
1521 genpd_set_cpumask(genpd, gpd_data->cpu); in genpd_add_device()
1522 dev_pm_domain_set(dev, &genpd->domain); in genpd_add_device()
1524 genpd->device_count++; in genpd_add_device()
1525 genpd->max_off_time_changed = true; in genpd_add_device()
1527 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); in genpd_add_device()
1529 genpd_unlock(genpd); in genpd_add_device()
1545 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) in pm_genpd_add_device() argument
1550 ret = genpd_add_device(genpd, dev, dev); in pm_genpd_add_device()
1557 static int genpd_remove_device(struct generic_pm_domain *genpd, in genpd_remove_device() argument
1571 genpd_lock(genpd); in genpd_remove_device()
1573 if (genpd->prepared_count > 0) { in genpd_remove_device()
1578 genpd->device_count--; in genpd_remove_device()
1579 genpd->max_off_time_changed = true; in genpd_remove_device()
1581 genpd_clear_cpumask(genpd, gpd_data->cpu); in genpd_remove_device()
1586 genpd_unlock(genpd); in genpd_remove_device()
1588 if (genpd->detach_dev) in genpd_remove_device()
1589 genpd->detach_dev(genpd, dev); in genpd_remove_device()
1596 genpd_unlock(genpd); in genpd_remove_device()
1608 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); in pm_genpd_remove_device() local
1610 if (!genpd) in pm_genpd_remove_device()
1613 return genpd_remove_device(genpd, dev); in pm_genpd_remove_device()
1617 static int genpd_add_subdomain(struct generic_pm_domain *genpd, in genpd_add_subdomain() argument
1623 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) in genpd_add_subdomain()
1624 || genpd == subdomain) in genpd_add_subdomain()
1632 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { in genpd_add_subdomain()
1634 genpd->name, subdomain->name); in genpd_add_subdomain()
1643 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); in genpd_add_subdomain()
1645 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { in genpd_add_subdomain()
1650 list_for_each_entry(itr, &genpd->master_links, master_node) { in genpd_add_subdomain()
1651 if (itr->slave == subdomain && itr->master == genpd) { in genpd_add_subdomain()
1657 link->master = genpd; in genpd_add_subdomain()
1658 list_add_tail(&link->master_node, &genpd->master_links); in genpd_add_subdomain()
1662 genpd_sd_counter_inc(genpd); in genpd_add_subdomain()
1665 genpd_unlock(genpd); in genpd_add_subdomain()
1677 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, in pm_genpd_add_subdomain() argument
1683 ret = genpd_add_subdomain(genpd, subdomain); in pm_genpd_add_subdomain()
1695 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, in pm_genpd_remove_subdomain() argument
1701 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) in pm_genpd_remove_subdomain()
1705 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); in pm_genpd_remove_subdomain()
1709 genpd->name, subdomain->name); in pm_genpd_remove_subdomain()
1714 list_for_each_entry_safe(link, l, &genpd->master_links, master_node) { in pm_genpd_remove_subdomain()
1722 genpd_sd_counter_dec(genpd); in pm_genpd_remove_subdomain()
1729 genpd_unlock(genpd); in pm_genpd_remove_subdomain()
1742 static int genpd_set_default_power_state(struct generic_pm_domain *genpd) in genpd_set_default_power_state() argument
1750 genpd->states = state; in genpd_set_default_power_state()
1751 genpd->state_count = 1; in genpd_set_default_power_state()
1752 genpd->free_states = genpd_free_default_power_state; in genpd_set_default_power_state()
1757 static void genpd_lock_init(struct generic_pm_domain *genpd) in genpd_lock_init() argument
1759 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { in genpd_lock_init()
1760 spin_lock_init(&genpd->slock); in genpd_lock_init()
1761 genpd->lock_ops = &genpd_spin_ops; in genpd_lock_init()
1763 mutex_init(&genpd->mlock); in genpd_lock_init()
1764 genpd->lock_ops = &genpd_mtx_ops; in genpd_lock_init()
1776 int pm_genpd_init(struct generic_pm_domain *genpd, in pm_genpd_init() argument
1781 if (IS_ERR_OR_NULL(genpd)) in pm_genpd_init()
1784 INIT_LIST_HEAD(&genpd->master_links); in pm_genpd_init()
1785 INIT_LIST_HEAD(&genpd->slave_links); in pm_genpd_init()
1786 INIT_LIST_HEAD(&genpd->dev_list); in pm_genpd_init()
1787 genpd_lock_init(genpd); in pm_genpd_init()
1788 genpd->gov = gov; in pm_genpd_init()
1789 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); in pm_genpd_init()
1790 atomic_set(&genpd->sd_count, 0); in pm_genpd_init()
1791 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; in pm_genpd_init()
1792 genpd->device_count = 0; in pm_genpd_init()
1793 genpd->max_off_time_ns = -1; in pm_genpd_init()
1794 genpd->max_off_time_changed = true; in pm_genpd_init()
1795 genpd->provider = NULL; in pm_genpd_init()
1796 genpd->has_provider = false; in pm_genpd_init()
1797 genpd->accounting_time = ktime_get(); in pm_genpd_init()
1798 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; in pm_genpd_init()
1799 genpd->domain.ops.runtime_resume = genpd_runtime_resume; in pm_genpd_init()
1800 genpd->domain.ops.prepare = genpd_prepare; in pm_genpd_init()
1801 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; in pm_genpd_init()
1802 genpd->domain.ops.resume_noirq = genpd_resume_noirq; in pm_genpd_init()
1803 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; in pm_genpd_init()
1804 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; in pm_genpd_init()
1805 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; in pm_genpd_init()
1806 genpd->domain.ops.restore_noirq = genpd_restore_noirq; in pm_genpd_init()
1807 genpd->domain.ops.complete = genpd_complete; in pm_genpd_init()
1809 if (genpd->flags & GENPD_FLAG_PM_CLK) { in pm_genpd_init()
1810 genpd->dev_ops.stop = pm_clk_suspend; in pm_genpd_init()
1811 genpd->dev_ops.start = pm_clk_resume; in pm_genpd_init()
1815 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && in pm_genpd_init()
1816 !genpd_status_on(genpd)) in pm_genpd_init()
1819 if (genpd_is_cpu_domain(genpd) && in pm_genpd_init()
1820 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) in pm_genpd_init()
1824 if (genpd->state_count == 0) { in pm_genpd_init()
1825 ret = genpd_set_default_power_state(genpd); in pm_genpd_init()
1827 if (genpd_is_cpu_domain(genpd)) in pm_genpd_init()
1828 free_cpumask_var(genpd->cpus); in pm_genpd_init()
1831 } else if (!gov && genpd->state_count > 1) { in pm_genpd_init()
1832 pr_warn("%s: no governor for states\n", genpd->name); in pm_genpd_init()
1835 device_initialize(&genpd->dev); in pm_genpd_init()
1836 dev_set_name(&genpd->dev, "%s", genpd->name); in pm_genpd_init()
1839 list_add(&genpd->gpd_list_node, &gpd_list); in pm_genpd_init()
1846 static int genpd_remove(struct generic_pm_domain *genpd) in genpd_remove() argument
1850 if (IS_ERR_OR_NULL(genpd)) in genpd_remove()
1853 genpd_lock(genpd); in genpd_remove()
1855 if (genpd->has_provider) { in genpd_remove()
1856 genpd_unlock(genpd); in genpd_remove()
1857 pr_err("Provider present, unable to remove %s\n", genpd->name); in genpd_remove()
1861 if (!list_empty(&genpd->master_links) || genpd->device_count) { in genpd_remove()
1862 genpd_unlock(genpd); in genpd_remove()
1863 pr_err("%s: unable to remove %s\n", __func__, genpd->name); in genpd_remove()
1867 list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) { in genpd_remove()
1873 list_del(&genpd->gpd_list_node); in genpd_remove()
1874 genpd_unlock(genpd); in genpd_remove()
1875 cancel_work_sync(&genpd->power_off_work); in genpd_remove()
1876 if (genpd_is_cpu_domain(genpd)) in genpd_remove()
1877 free_cpumask_var(genpd->cpus); in genpd_remove()
1878 if (genpd->free_states) in genpd_remove()
1879 genpd->free_states(genpd->states, genpd->state_count); in genpd_remove()
1881 pr_debug("%s: removed %s\n", __func__, genpd->name); in genpd_remove()
1899 int pm_genpd_remove(struct generic_pm_domain *genpd) in pm_genpd_remove() argument
1904 ret = genpd_remove(genpd); in pm_genpd_remove()
2029 struct generic_pm_domain *genpd) in of_genpd_add_provider_simple() argument
2033 if (!np || !genpd) in of_genpd_add_provider_simple()
2038 if (!genpd_present(genpd)) in of_genpd_add_provider_simple()
2041 genpd->dev.of_node = np; in of_genpd_add_provider_simple()
2044 if (genpd->set_performance_state) { in of_genpd_add_provider_simple()
2045 ret = dev_pm_opp_of_add_table(&genpd->dev); in of_genpd_add_provider_simple()
2047 dev_err(&genpd->dev, "Failed to add OPP table: %d\n", in of_genpd_add_provider_simple()
2056 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); in of_genpd_add_provider_simple()
2057 WARN_ON(!genpd->opp_table); in of_genpd_add_provider_simple()
2060 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); in of_genpd_add_provider_simple()
2062 if (genpd->set_performance_state) { in of_genpd_add_provider_simple()
2063 dev_pm_opp_put_opp_table(genpd->opp_table); in of_genpd_add_provider_simple()
2064 dev_pm_opp_of_remove_table(&genpd->dev); in of_genpd_add_provider_simple()
2070 genpd->provider = &np->fwnode; in of_genpd_add_provider_simple()
2071 genpd->has_provider = true; in of_genpd_add_provider_simple()
2088 struct generic_pm_domain *genpd; in of_genpd_add_provider_onecell() local
2101 genpd = data->domains[i]; in of_genpd_add_provider_onecell()
2103 if (!genpd) in of_genpd_add_provider_onecell()
2105 if (!genpd_present(genpd)) in of_genpd_add_provider_onecell()
2108 genpd->dev.of_node = np; in of_genpd_add_provider_onecell()
2111 if (genpd->set_performance_state) { in of_genpd_add_provider_onecell()
2112 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); in of_genpd_add_provider_onecell()
2114 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n", in of_genpd_add_provider_onecell()
2123 genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i); in of_genpd_add_provider_onecell()
2124 WARN_ON(!genpd->opp_table); in of_genpd_add_provider_onecell()
2127 genpd->provider = &np->fwnode; in of_genpd_add_provider_onecell()
2128 genpd->has_provider = true; in of_genpd_add_provider_onecell()
2141 genpd = data->domains[i]; in of_genpd_add_provider_onecell()
2143 if (!genpd) in of_genpd_add_provider_onecell()
2146 genpd->provider = NULL; in of_genpd_add_provider_onecell()
2147 genpd->has_provider = false; in of_genpd_add_provider_onecell()
2149 if (genpd->set_performance_state) { in of_genpd_add_provider_onecell()
2150 dev_pm_opp_put_opp_table(genpd->opp_table); in of_genpd_add_provider_onecell()
2151 dev_pm_opp_of_remove_table(&genpd->dev); in of_genpd_add_provider_onecell()
2216 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); in genpd_get_from_provider() local
2227 genpd = provider->xlate(genpdspec, provider->data); in genpd_get_from_provider()
2228 if (!IS_ERR(genpd)) in genpd_get_from_provider()
2234 return genpd; in genpd_get_from_provider()
2247 struct generic_pm_domain *genpd; in of_genpd_add_device() local
2252 genpd = genpd_get_from_provider(genpdspec); in of_genpd_add_device()
2253 if (IS_ERR(genpd)) { in of_genpd_add_device()
2254 ret = PTR_ERR(genpd); in of_genpd_add_device()
2258 ret = genpd_add_device(genpd, dev, dev); in of_genpd_add_device()
2320 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); in of_genpd_remove_last() local
2330 genpd = ret ? ERR_PTR(ret) : gpd; in of_genpd_remove_last()
2336 return genpd; in of_genpd_remove_last()
2701 struct generic_pm_domain *genpd = NULL; in pm_genpd_opp_to_performance_state() local
2704 genpd = container_of(genpd_dev, struct generic_pm_domain, dev); in pm_genpd_opp_to_performance_state()
2706 if (unlikely(!genpd->opp_to_performance_state)) in pm_genpd_opp_to_performance_state()
2709 genpd_lock(genpd); in pm_genpd_opp_to_performance_state()
2710 state = genpd->opp_to_performance_state(genpd, opp); in pm_genpd_opp_to_performance_state()
2711 genpd_unlock(genpd); in pm_genpd_opp_to_performance_state()
2764 struct generic_pm_domain *genpd) in genpd_summary_one() argument
2776 ret = genpd_lock_interruptible(genpd); in genpd_summary_one()
2780 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) in genpd_summary_one()
2782 if (!genpd_status_on(genpd)) in genpd_summary_one()
2784 status_lookup[genpd->status], genpd->state_idx); in genpd_summary_one()
2787 status_lookup[genpd->status]); in genpd_summary_one()
2788 seq_printf(s, "%-30s %-15s ", genpd->name, state); in genpd_summary_one()
2795 list_for_each_entry(link, &genpd->master_links, master_node) { in genpd_summary_one()
2797 if (!list_is_last(&link->master_node, &genpd->master_links)) in genpd_summary_one()
2801 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { in genpd_summary_one()
2803 genpd_is_irq_safe(genpd) ? in genpd_summary_one()
2815 genpd_unlock(genpd); in genpd_summary_one()
2822 struct generic_pm_domain *genpd; in summary_show() local
2833 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { in summary_show()
2834 ret = genpd_summary_one(s, genpd); in summary_show()
2850 struct generic_pm_domain *genpd = s->private; in status_show() local
2853 ret = genpd_lock_interruptible(genpd); in status_show()
2857 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) in status_show()
2860 if (genpd->status == GPD_STATE_POWER_OFF) in status_show()
2861 seq_printf(s, "%s-%u\n", status_lookup[genpd->status], in status_show()
2862 genpd->state_idx); in status_show()
2864 seq_printf(s, "%s\n", status_lookup[genpd->status]); in status_show()
2866 genpd_unlock(genpd); in status_show()
2872 struct generic_pm_domain *genpd = s->private; in sub_domains_show() local
2876 ret = genpd_lock_interruptible(genpd); in sub_domains_show()
2880 list_for_each_entry(link, &genpd->master_links, master_node) in sub_domains_show()
2883 genpd_unlock(genpd); in sub_domains_show()
2889 struct generic_pm_domain *genpd = s->private; in idle_states_show() local
2893 ret = genpd_lock_interruptible(genpd); in idle_states_show()
2899 for (i = 0; i < genpd->state_count; i++) { in idle_states_show()
2903 if ((genpd->status == GPD_STATE_POWER_OFF) && in idle_states_show()
2904 (genpd->state_idx == i)) in idle_states_show()
2905 delta = ktime_sub(ktime_get(), genpd->accounting_time); in idle_states_show()
2908 ktime_add(genpd->states[i].idle_time, delta)); in idle_states_show()
2912 genpd_unlock(genpd); in idle_states_show()
2918 struct generic_pm_domain *genpd = s->private; in active_time_show() local
2922 ret = genpd_lock_interruptible(genpd); in active_time_show()
2926 if (genpd->status == GPD_STATE_ACTIVE) in active_time_show()
2927 delta = ktime_sub(ktime_get(), genpd->accounting_time); in active_time_show()
2930 ktime_add(genpd->on_time, delta))); in active_time_show()
2932 genpd_unlock(genpd); in active_time_show()
2938 struct generic_pm_domain *genpd = s->private; in total_idle_time_show() local
2943 ret = genpd_lock_interruptible(genpd); in total_idle_time_show()
2947 for (i = 0; i < genpd->state_count; i++) { in total_idle_time_show()
2949 if ((genpd->status == GPD_STATE_POWER_OFF) && in total_idle_time_show()
2950 (genpd->state_idx == i)) in total_idle_time_show()
2951 delta = ktime_sub(ktime_get(), genpd->accounting_time); in total_idle_time_show()
2953 total = ktime_add(total, genpd->states[i].idle_time); in total_idle_time_show()
2959 genpd_unlock(genpd); in total_idle_time_show()
2966 struct generic_pm_domain *genpd = s->private; in devices_show() local
2971 ret = genpd_lock_interruptible(genpd); in devices_show()
2975 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { in devices_show()
2977 genpd_is_irq_safe(genpd) ? in devices_show()
2986 genpd_unlock(genpd); in devices_show()
2992 struct generic_pm_domain *genpd = s->private; in perf_state_show() local
2994 if (genpd_lock_interruptible(genpd)) in perf_state_show()
2997 seq_printf(s, "%u\n", genpd->performance_state); in perf_state_show()
2999 genpd_unlock(genpd); in perf_state_show()
3015 struct generic_pm_domain *genpd; in genpd_debug_init() local
3022 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { in genpd_debug_init()
3023 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); in genpd_debug_init()
3026 d, genpd, &status_fops); in genpd_debug_init()
3028 d, genpd, &sub_domains_fops); in genpd_debug_init()
3030 d, genpd, &idle_states_fops); in genpd_debug_init()
3032 d, genpd, &active_time_fops); in genpd_debug_init()
3034 d, genpd, &total_idle_time_fops); in genpd_debug_init()
3036 d, genpd, &devices_fops); in genpd_debug_init()
3037 if (genpd->set_performance_state) in genpd_debug_init()
3039 d, genpd, &perf_state_fops); in genpd_debug_init()