Lines Matching full:core
50 struct clk_core *core; member
98 struct clk_core *core; member
109 static int clk_pm_runtime_get(struct clk_core *core) in clk_pm_runtime_get() argument
111 if (!core->rpm_enabled) in clk_pm_runtime_get()
114 return pm_runtime_resume_and_get(core->dev); in clk_pm_runtime_get()
117 static void clk_pm_runtime_put(struct clk_core *core) in clk_pm_runtime_put() argument
119 if (!core->rpm_enabled) in clk_pm_runtime_put()
122 pm_runtime_put_sync(core->dev); in clk_pm_runtime_put()
194 static bool clk_core_rate_is_protected(struct clk_core *core) in clk_core_rate_is_protected() argument
196 return core->protect_count; in clk_core_rate_is_protected()
199 static bool clk_core_is_prepared(struct clk_core *core) in clk_core_is_prepared() argument
207 if (!core->ops->is_prepared) in clk_core_is_prepared()
208 return core->prepare_count; in clk_core_is_prepared()
210 if (!clk_pm_runtime_get(core)) { in clk_core_is_prepared()
211 ret = core->ops->is_prepared(core->hw); in clk_core_is_prepared()
212 clk_pm_runtime_put(core); in clk_core_is_prepared()
218 static bool clk_core_is_enabled(struct clk_core *core) in clk_core_is_enabled() argument
226 if (!core->ops->is_enabled) in clk_core_is_enabled()
227 return core->enable_count; in clk_core_is_enabled()
239 if (core->rpm_enabled) { in clk_core_is_enabled()
240 pm_runtime_get_noresume(core->dev); in clk_core_is_enabled()
241 if (!pm_runtime_active(core->dev)) { in clk_core_is_enabled()
247 ret = core->ops->is_enabled(core->hw); in clk_core_is_enabled()
249 if (core->rpm_enabled) in clk_core_is_enabled()
250 pm_runtime_put(core->dev); in clk_core_is_enabled()
259 return !clk ? NULL : clk->core->name; in __clk_get_name()
265 return hw->core->name; in clk_hw_get_name()
271 return !clk ? NULL : clk->core->hw; in __clk_get_hw()
277 return hw->core->num_parents; in clk_hw_get_num_parents()
283 return hw->core->parent ? hw->core->parent->hw : NULL; in clk_hw_get_parent()
288 struct clk_core *core) in __clk_lookup_subtree() argument
293 if (!strcmp(core->name, name)) in __clk_lookup_subtree()
294 return core; in __clk_lookup_subtree()
296 hlist_for_each_entry(child, &core->children, child_node) { in __clk_lookup_subtree()
351 * @core: clk to find parent of
385 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) in clk_core_get() argument
387 const char *name = core->parents[p_index].fw_name; in clk_core_get()
388 int index = core->parents[p_index].index; in clk_core_get()
390 struct device *dev = core->dev; in clk_core_get()
392 struct device_node *np = core->of_node; in clk_core_get()
410 return hw->core; in clk_core_get()
413 static void clk_core_fill_parent_index(struct clk_core *core, u8 index) in clk_core_fill_parent_index() argument
415 struct clk_parent_map *entry = &core->parents[index]; in clk_core_fill_parent_index()
419 parent = entry->hw->core; in clk_core_fill_parent_index()
421 parent = clk_core_get(core, index); in clk_core_fill_parent_index()
436 entry->core = parent; in clk_core_fill_parent_index()
439 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, in clk_core_get_parent_by_index() argument
442 if (!core || index >= core->num_parents || !core->parents) in clk_core_get_parent_by_index()
445 if (!core->parents[index].core) in clk_core_get_parent_by_index()
446 clk_core_fill_parent_index(core, index); in clk_core_get_parent_by_index()
448 return core->parents[index].core; in clk_core_get_parent_by_index()
456 parent = clk_core_get_parent_by_index(hw->core, index); in clk_hw_get_parent_by_index()
464 return !clk ? 0 : clk->core->enable_count; in __clk_get_enable_count()
467 static unsigned long clk_core_get_rate_nolock(struct clk_core *core) in clk_core_get_rate_nolock() argument
469 if (!core) in clk_core_get_rate_nolock()
472 if (!core->num_parents || core->parent) in clk_core_get_rate_nolock()
473 return core->rate; in clk_core_get_rate_nolock()
485 return clk_core_get_rate_nolock(hw->core); in clk_hw_get_rate()
489 static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core) in clk_core_get_accuracy_no_lock() argument
491 if (!core) in clk_core_get_accuracy_no_lock()
494 return core->accuracy; in clk_core_get_accuracy_no_lock()
499 return hw->core->flags; in clk_hw_get_flags()
505 return clk_core_is_prepared(hw->core); in clk_hw_is_prepared()
511 return clk_core_rate_is_protected(hw->core); in clk_hw_rate_is_protected()
517 return clk_core_is_enabled(hw->core); in clk_hw_is_enabled()
526 return clk_core_is_enabled(clk->core); in __clk_is_enabled()
539 static void clk_core_init_rate_req(struct clk_core * const core,
543 static int clk_core_round_rate_nolock(struct clk_core *core,
546 static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent) in clk_core_has_parent() argument
552 if (core->parent == parent) in clk_core_has_parent()
555 for (i = 0; i < core->num_parents; i++) { in clk_core_has_parent()
556 tmp = clk_core_get_parent_by_index(core, i); in clk_core_has_parent()
568 clk_core_forward_rate_req(struct clk_core *core, in clk_core_forward_rate_req() argument
574 if (WARN_ON(!clk_core_has_parent(core, parent))) in clk_core_forward_rate_req()
590 struct clk_core *core = hw->core, *parent, *best_parent = NULL; in clk_mux_determine_rate_flags() local
595 if (core->flags & CLK_SET_RATE_NO_REPARENT) { in clk_mux_determine_rate_flags()
596 parent = core->parent; in clk_mux_determine_rate_flags()
597 if (core->flags & CLK_SET_RATE_PARENT) { in clk_mux_determine_rate_flags()
605 clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate); in clk_mux_determine_rate_flags()
614 best = clk_core_get_rate_nolock(core); in clk_mux_determine_rate_flags()
621 num_parents = core->num_parents; in clk_mux_determine_rate_flags()
625 parent = clk_core_get_parent_by_index(core, i); in clk_mux_determine_rate_flags()
629 if (core->flags & CLK_SET_RATE_PARENT) { in clk_mux_determine_rate_flags()
632 clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate); in clk_mux_determine_rate_flags()
664 struct clk_core *core = clk_core_lookup(name); in __clk_lookup() local
666 return !core ? NULL : core->hw->clk; in __clk_lookup()
669 static void clk_core_get_boundaries(struct clk_core *core, in clk_core_get_boundaries() argument
677 *min_rate = core->min_rate; in clk_core_get_boundaries()
678 *max_rate = core->max_rate; in clk_core_get_boundaries()
680 hlist_for_each_entry(clk_user, &core->clks, clks_node) in clk_core_get_boundaries()
683 hlist_for_each_entry(clk_user, &core->clks, clks_node) in clk_core_get_boundaries()
699 clk_core_get_boundaries(hw->core, min_rate, max_rate); in clk_hw_get_rate_range()
703 static bool clk_core_check_boundaries(struct clk_core *core, in clk_core_check_boundaries() argument
711 if (min_rate > core->max_rate || max_rate < core->min_rate) in clk_core_check_boundaries()
714 hlist_for_each_entry(user, &core->clks, clks_node) in clk_core_check_boundaries()
724 hw->core->min_rate = min_rate; in clk_hw_set_rate_range()
725 hw->core->max_rate = max_rate; in clk_hw_set_rate_range()
756 static void clk_core_rate_unprotect(struct clk_core *core) in clk_core_rate_unprotect() argument
760 if (!core) in clk_core_rate_unprotect()
763 if (WARN(core->protect_count == 0, in clk_core_rate_unprotect()
764 "%s already unprotected\n", core->name)) in clk_core_rate_unprotect()
767 if (--core->protect_count > 0) in clk_core_rate_unprotect()
770 clk_core_rate_unprotect(core->parent); in clk_core_rate_unprotect()
773 static int clk_core_rate_nuke_protect(struct clk_core *core) in clk_core_rate_nuke_protect() argument
779 if (!core) in clk_core_rate_nuke_protect()
782 if (core->protect_count == 0) in clk_core_rate_nuke_protect()
785 ret = core->protect_count; in clk_core_rate_nuke_protect()
786 core->protect_count = 1; in clk_core_rate_nuke_protect()
787 clk_core_rate_unprotect(core); in clk_core_rate_nuke_protect()
824 clk_core_rate_unprotect(clk->core); in clk_rate_exclusive_put()
831 static void clk_core_rate_protect(struct clk_core *core) in clk_core_rate_protect() argument
835 if (!core) in clk_core_rate_protect()
838 if (core->protect_count == 0) in clk_core_rate_protect()
839 clk_core_rate_protect(core->parent); in clk_core_rate_protect()
841 core->protect_count++; in clk_core_rate_protect()
844 static void clk_core_rate_restore_protect(struct clk_core *core, int count) in clk_core_rate_restore_protect() argument
848 if (!core) in clk_core_rate_restore_protect()
854 clk_core_rate_protect(core); in clk_core_rate_restore_protect()
855 core->protect_count = count; in clk_core_rate_restore_protect()
882 clk_core_rate_protect(clk->core); in clk_rate_exclusive_get()
890 static void clk_core_unprepare(struct clk_core *core) in clk_core_unprepare() argument
894 if (!core) in clk_core_unprepare()
897 if (WARN(core->prepare_count == 0, in clk_core_unprepare()
898 "%s already unprepared\n", core->name)) in clk_core_unprepare()
901 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, in clk_core_unprepare()
902 "Unpreparing critical %s\n", core->name)) in clk_core_unprepare()
905 if (core->flags & CLK_SET_RATE_GATE) in clk_core_unprepare()
906 clk_core_rate_unprotect(core); in clk_core_unprepare()
908 if (--core->prepare_count > 0) in clk_core_unprepare()
911 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); in clk_core_unprepare()
913 trace_clk_unprepare(core); in clk_core_unprepare()
915 if (core->ops->unprepare) in clk_core_unprepare()
916 core->ops->unprepare(core->hw); in clk_core_unprepare()
918 trace_clk_unprepare_complete(core); in clk_core_unprepare()
919 clk_core_unprepare(core->parent); in clk_core_unprepare()
920 clk_pm_runtime_put(core); in clk_core_unprepare()
923 static void clk_core_unprepare_lock(struct clk_core *core) in clk_core_unprepare_lock() argument
926 clk_core_unprepare(core); in clk_core_unprepare_lock()
946 clk_core_unprepare_lock(clk->core); in clk_unprepare()
950 static int clk_core_prepare(struct clk_core *core) in clk_core_prepare() argument
956 if (!core) in clk_core_prepare()
959 if (core->prepare_count == 0) { in clk_core_prepare()
960 ret = clk_pm_runtime_get(core); in clk_core_prepare()
964 ret = clk_core_prepare(core->parent); in clk_core_prepare()
968 trace_clk_prepare(core); in clk_core_prepare()
970 if (core->ops->prepare) in clk_core_prepare()
971 ret = core->ops->prepare(core->hw); in clk_core_prepare()
973 trace_clk_prepare_complete(core); in clk_core_prepare()
979 core->prepare_count++; in clk_core_prepare()
988 if (core->flags & CLK_SET_RATE_GATE) in clk_core_prepare()
989 clk_core_rate_protect(core); in clk_core_prepare()
993 clk_core_unprepare(core->parent); in clk_core_prepare()
995 clk_pm_runtime_put(core); in clk_core_prepare()
999 static int clk_core_prepare_lock(struct clk_core *core) in clk_core_prepare_lock() argument
1004 ret = clk_core_prepare(core); in clk_core_prepare_lock()
1027 return clk_core_prepare_lock(clk->core); in clk_prepare()
1031 static void clk_core_disable(struct clk_core *core) in clk_core_disable() argument
1035 if (!core) in clk_core_disable()
1038 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) in clk_core_disable()
1041 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, in clk_core_disable()
1042 "Disabling critical %s\n", core->name)) in clk_core_disable()
1045 if (--core->enable_count > 0) in clk_core_disable()
1048 trace_clk_disable_rcuidle(core); in clk_core_disable()
1050 if (core->ops->disable) in clk_core_disable()
1051 core->ops->disable(core->hw); in clk_core_disable()
1053 trace_clk_disable_complete_rcuidle(core); in clk_core_disable()
1055 clk_core_disable(core->parent); in clk_core_disable()
1058 static void clk_core_disable_lock(struct clk_core *core) in clk_core_disable_lock() argument
1063 clk_core_disable(core); in clk_core_disable_lock()
1084 clk_core_disable_lock(clk->core); in clk_disable()
1088 static int clk_core_enable(struct clk_core *core) in clk_core_enable() argument
1094 if (!core) in clk_core_enable()
1097 if (WARN(core->prepare_count == 0, in clk_core_enable()
1098 "Enabling unprepared %s\n", core->name)) in clk_core_enable()
1101 if (core->enable_count == 0) { in clk_core_enable()
1102 ret = clk_core_enable(core->parent); in clk_core_enable()
1107 trace_clk_enable_rcuidle(core); in clk_core_enable()
1109 if (core->ops->enable) in clk_core_enable()
1110 ret = core->ops->enable(core->hw); in clk_core_enable()
1112 trace_clk_enable_complete_rcuidle(core); in clk_core_enable()
1115 clk_core_disable(core->parent); in clk_core_enable()
1120 core->enable_count++; in clk_core_enable()
1124 static int clk_core_enable_lock(struct clk_core *core) in clk_core_enable_lock() argument
1130 ret = clk_core_enable(core); in clk_core_enable_lock()
1148 struct clk_core *core = hw->core; in clk_gate_restore_context() local
1150 if (core->enable_count) in clk_gate_restore_context()
1151 core->ops->enable(hw); in clk_gate_restore_context()
1153 core->ops->disable(hw); in clk_gate_restore_context()
1157 static int clk_core_save_context(struct clk_core *core) in clk_core_save_context() argument
1162 hlist_for_each_entry(child, &core->children, child_node) { in clk_core_save_context()
1168 if (core->ops && core->ops->save_context) in clk_core_save_context()
1169 ret = core->ops->save_context(core->hw); in clk_core_save_context()
1174 static void clk_core_restore_context(struct clk_core *core) in clk_core_restore_context() argument
1178 if (core->ops && core->ops->restore_context) in clk_core_restore_context()
1179 core->ops->restore_context(core->hw); in clk_core_restore_context()
1181 hlist_for_each_entry(child, &core->children, child_node) in clk_core_restore_context()
1221 struct clk_core *core; in clk_restore_context() local
1223 hlist_for_each_entry(core, &clk_root_list, child_node) in clk_restore_context()
1224 clk_core_restore_context(core); in clk_restore_context()
1226 hlist_for_each_entry(core, &clk_orphan_list, child_node) in clk_restore_context()
1227 clk_core_restore_context(core); in clk_restore_context()
1249 return clk_core_enable_lock(clk->core); in clk_enable()
1270 return clk && !(clk->core->ops->enable && clk->core->ops->disable); in clk_is_enabled_when_prepared()
1274 static int clk_core_prepare_enable(struct clk_core *core) in clk_core_prepare_enable() argument
1278 ret = clk_core_prepare_lock(core); in clk_core_prepare_enable()
1282 ret = clk_core_enable_lock(core); in clk_core_prepare_enable()
1284 clk_core_unprepare_lock(core); in clk_core_prepare_enable()
1289 static void clk_core_disable_unprepare(struct clk_core *core) in clk_core_disable_unprepare() argument
1291 clk_core_disable_lock(core); in clk_core_disable_unprepare()
1292 clk_core_unprepare_lock(core); in clk_core_disable_unprepare()
1295 static void __init clk_unprepare_unused_subtree(struct clk_core *core) in clk_unprepare_unused_subtree() argument
1301 hlist_for_each_entry(child, &core->children, child_node) in clk_unprepare_unused_subtree()
1304 if (core->prepare_count) in clk_unprepare_unused_subtree()
1307 if (core->flags & CLK_IGNORE_UNUSED) in clk_unprepare_unused_subtree()
1310 if (clk_pm_runtime_get(core)) in clk_unprepare_unused_subtree()
1313 if (clk_core_is_prepared(core)) { in clk_unprepare_unused_subtree()
1314 trace_clk_unprepare(core); in clk_unprepare_unused_subtree()
1315 if (core->ops->unprepare_unused) in clk_unprepare_unused_subtree()
1316 core->ops->unprepare_unused(core->hw); in clk_unprepare_unused_subtree()
1317 else if (core->ops->unprepare) in clk_unprepare_unused_subtree()
1318 core->ops->unprepare(core->hw); in clk_unprepare_unused_subtree()
1319 trace_clk_unprepare_complete(core); in clk_unprepare_unused_subtree()
1322 clk_pm_runtime_put(core); in clk_unprepare_unused_subtree()
1325 static void __init clk_disable_unused_subtree(struct clk_core *core) in clk_disable_unused_subtree() argument
1332 hlist_for_each_entry(child, &core->children, child_node) in clk_disable_unused_subtree()
1335 if (core->flags & CLK_OPS_PARENT_ENABLE) in clk_disable_unused_subtree()
1336 clk_core_prepare_enable(core->parent); in clk_disable_unused_subtree()
1338 if (clk_pm_runtime_get(core)) in clk_disable_unused_subtree()
1343 if (core->enable_count) in clk_disable_unused_subtree()
1346 if (core->flags & CLK_IGNORE_UNUSED) in clk_disable_unused_subtree()
1354 if (clk_core_is_enabled(core)) { in clk_disable_unused_subtree()
1355 trace_clk_disable(core); in clk_disable_unused_subtree()
1356 if (core->ops->disable_unused) in clk_disable_unused_subtree()
1357 core->ops->disable_unused(core->hw); in clk_disable_unused_subtree()
1358 else if (core->ops->disable) in clk_disable_unused_subtree()
1359 core->ops->disable(core->hw); in clk_disable_unused_subtree()
1360 trace_clk_disable_complete(core); in clk_disable_unused_subtree()
1365 clk_pm_runtime_put(core); in clk_disable_unused_subtree()
1367 if (core->flags & CLK_OPS_PARENT_ENABLE) in clk_disable_unused_subtree()
1368 clk_core_disable_unprepare(core->parent); in clk_disable_unused_subtree()
1381 struct clk_core *core; in clk_disable_unused() local
1390 hlist_for_each_entry(core, &clk_root_list, child_node) in clk_disable_unused()
1391 clk_disable_unused_subtree(core); in clk_disable_unused()
1393 hlist_for_each_entry(core, &clk_orphan_list, child_node) in clk_disable_unused()
1394 clk_disable_unused_subtree(core); in clk_disable_unused()
1396 hlist_for_each_entry(core, &clk_root_list, child_node) in clk_disable_unused()
1397 clk_unprepare_unused_subtree(core); in clk_disable_unused()
1399 hlist_for_each_entry(core, &clk_orphan_list, child_node) in clk_disable_unused()
1400 clk_unprepare_unused_subtree(core); in clk_disable_unused()
1408 static int clk_core_determine_round_nolock(struct clk_core *core, in clk_core_determine_round_nolock() argument
1415 if (!core) in clk_core_determine_round_nolock()
1428 __func__, core->name); in clk_core_determine_round_nolock()
1433 * At this point, core protection will be disabled in clk_core_determine_round_nolock()
1438 if (clk_core_rate_is_protected(core)) { in clk_core_determine_round_nolock()
1439 req->rate = core->rate; in clk_core_determine_round_nolock()
1440 } else if (core->ops->determine_rate) { in clk_core_determine_round_nolock()
1441 return core->ops->determine_rate(core->hw, req); in clk_core_determine_round_nolock()
1442 } else if (core->ops->round_rate) { in clk_core_determine_round_nolock()
1443 rate = core->ops->round_rate(core->hw, req->rate, in clk_core_determine_round_nolock()
1456 static void clk_core_init_rate_req(struct clk_core * const core, in clk_core_init_rate_req() argument
1468 if (!core) in clk_core_init_rate_req()
1472 clk_core_get_boundaries(core, &req->min_rate, &req->max_rate); in clk_core_init_rate_req()
1474 parent = core->parent; in clk_core_init_rate_req()
1500 clk_core_init_rate_req(hw->core, req, rate); in clk_hw_init_rate_request()
1524 clk_core_forward_rate_req(hw->core, old_req, in clk_hw_forward_rate_request()
1525 parent->core, req, in clk_hw_forward_rate_request()
1529 static bool clk_core_can_round(struct clk_core * const core) in clk_core_can_round() argument
1531 return core->ops->determine_rate || core->ops->round_rate; in clk_core_can_round()
1534 static int clk_core_round_rate_nolock(struct clk_core *core, in clk_core_round_rate_nolock() argument
1541 if (!core) { in clk_core_round_rate_nolock()
1546 if (clk_core_can_round(core)) in clk_core_round_rate_nolock()
1547 return clk_core_determine_round_nolock(core, req); in clk_core_round_rate_nolock()
1549 if (core->flags & CLK_SET_RATE_PARENT) { in clk_core_round_rate_nolock()
1552 clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate); in clk_core_round_rate_nolock()
1553 ret = clk_core_round_rate_nolock(core->parent, &parent_req); in clk_core_round_rate_nolock()
1563 req->rate = core->rate; in clk_core_round_rate_nolock()
1581 return clk_core_round_rate_nolock(hw->core, req); in __clk_determine_rate()
1605 clk_core_init_rate_req(hw->core, &req, rate); in clk_hw_round_rate()
1607 ret = clk_core_round_rate_nolock(hw->core, &req); in clk_hw_round_rate()
1635 clk_core_rate_unprotect(clk->core); in clk_round_rate()
1637 clk_core_init_rate_req(clk->core, &req, rate); in clk_round_rate()
1639 ret = clk_core_round_rate_nolock(clk->core, &req); in clk_round_rate()
1642 clk_core_rate_protect(clk->core); in clk_round_rate()
1655 * @core: clk that is changing rate
1667 static int __clk_notify(struct clk_core *core, unsigned long msg, in __clk_notify() argument
1678 if (cn->clk->core == core) { in __clk_notify()
1692 * @core: first clk in the subtree
1699 static void __clk_recalc_accuracies(struct clk_core *core) in __clk_recalc_accuracies() argument
1706 if (core->parent) in __clk_recalc_accuracies()
1707 parent_accuracy = core->parent->accuracy; in __clk_recalc_accuracies()
1709 if (core->ops->recalc_accuracy) in __clk_recalc_accuracies()
1710 core->accuracy = core->ops->recalc_accuracy(core->hw, in __clk_recalc_accuracies()
1713 core->accuracy = parent_accuracy; in __clk_recalc_accuracies()
1715 hlist_for_each_entry(child, &core->children, child_node) in __clk_recalc_accuracies()
1719 static long clk_core_get_accuracy_recalc(struct clk_core *core) in clk_core_get_accuracy_recalc() argument
1721 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) in clk_core_get_accuracy_recalc()
1722 __clk_recalc_accuracies(core); in clk_core_get_accuracy_recalc()
1724 return clk_core_get_accuracy_no_lock(core); in clk_core_get_accuracy_recalc()
1744 accuracy = clk_core_get_accuracy_recalc(clk->core); in clk_get_accuracy()
1751 static unsigned long clk_recalc(struct clk_core *core, in clk_recalc() argument
1756 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { in clk_recalc()
1757 rate = core->ops->recalc_rate(core->hw, parent_rate); in clk_recalc()
1758 clk_pm_runtime_put(core); in clk_recalc()
1765 * @core: first clk in the subtree
1776 static void __clk_recalc_rates(struct clk_core *core, bool update_req, in __clk_recalc_rates() argument
1785 old_rate = core->rate; in __clk_recalc_rates()
1787 if (core->parent) in __clk_recalc_rates()
1788 parent_rate = core->parent->rate; in __clk_recalc_rates()
1790 core->rate = clk_recalc(core, parent_rate); in __clk_recalc_rates()
1792 core->req_rate = core->rate; in __clk_recalc_rates()
1798 if (core->notifier_count && msg) in __clk_recalc_rates()
1799 __clk_notify(core, msg, old_rate, core->rate); in __clk_recalc_rates()
1801 hlist_for_each_entry(child, &core->children, child_node) in __clk_recalc_rates()
1805 static unsigned long clk_core_get_rate_recalc(struct clk_core *core) in clk_core_get_rate_recalc() argument
1807 if (core && (core->flags & CLK_GET_RATE_NOCACHE)) in clk_core_get_rate_recalc()
1808 __clk_recalc_rates(core, false, 0); in clk_core_get_rate_recalc()
1810 return clk_core_get_rate_nolock(core); in clk_core_get_rate_recalc()
1830 rate = clk_core_get_rate_recalc(clk->core); in clk_get_rate()
1837 static int clk_fetch_parent_index(struct clk_core *core, in clk_fetch_parent_index() argument
1845 for (i = 0; i < core->num_parents; i++) { in clk_fetch_parent_index()
1847 if (core->parents[i].core == parent) in clk_fetch_parent_index()
1851 if (core->parents[i].core) in clk_fetch_parent_index()
1854 /* Maybe core hasn't been cached but the hw is all we know? */ in clk_fetch_parent_index()
1855 if (core->parents[i].hw) { in clk_fetch_parent_index()
1856 if (core->parents[i].hw == parent->hw) in clk_fetch_parent_index()
1864 if (parent == clk_core_get(core, i)) in clk_fetch_parent_index()
1868 if (core->parents[i].name && in clk_fetch_parent_index()
1869 !strcmp(parent->name, core->parents[i].name)) in clk_fetch_parent_index()
1873 if (i == core->num_parents) in clk_fetch_parent_index()
1876 core->parents[i].core = parent; in clk_fetch_parent_index()
1894 return clk_fetch_parent_index(hw->core, parent->core); in clk_hw_get_parent_index()
1899 * Update the orphan status of @core and all its children.
1901 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) in clk_core_update_orphan_status() argument
1905 core->orphan = is_orphan; in clk_core_update_orphan_status()
1907 hlist_for_each_entry(child, &core->children, child_node) in clk_core_update_orphan_status()
1911 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) in clk_reparent() argument
1913 bool was_orphan = core->orphan; in clk_reparent()
1915 hlist_del(&core->child_node); in clk_reparent()
1921 if (new_parent->new_child == core) in clk_reparent()
1924 hlist_add_head(&core->child_node, &new_parent->children); in clk_reparent()
1927 clk_core_update_orphan_status(core, becomes_orphan); in clk_reparent()
1929 hlist_add_head(&core->child_node, &clk_orphan_list); in clk_reparent()
1931 clk_core_update_orphan_status(core, true); in clk_reparent()
1934 core->parent = new_parent; in clk_reparent()
1937 static struct clk_core *__clk_set_parent_before(struct clk_core *core, in __clk_set_parent_before() argument
1941 struct clk_core *old_parent = core->parent; in __clk_set_parent_before()
1964 if (core->flags & CLK_OPS_PARENT_ENABLE) { in __clk_set_parent_before()
1970 if (core->prepare_count) { in __clk_set_parent_before()
1972 clk_core_enable_lock(core); in __clk_set_parent_before()
1977 clk_reparent(core, parent); in __clk_set_parent_before()
1983 static void __clk_set_parent_after(struct clk_core *core, in __clk_set_parent_after() argument
1991 if (core->prepare_count) { in __clk_set_parent_after()
1992 clk_core_disable_lock(core); in __clk_set_parent_after()
1997 if (core->flags & CLK_OPS_PARENT_ENABLE) { in __clk_set_parent_after()
2003 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, in __clk_set_parent() argument
2010 old_parent = __clk_set_parent_before(core, parent); in __clk_set_parent()
2012 trace_clk_set_parent(core, parent); in __clk_set_parent()
2015 if (parent && core->ops->set_parent) in __clk_set_parent()
2016 ret = core->ops->set_parent(core->hw, p_index); in __clk_set_parent()
2018 trace_clk_set_parent_complete(core, parent); in __clk_set_parent()
2022 clk_reparent(core, old_parent); in __clk_set_parent()
2025 __clk_set_parent_after(core, old_parent, parent); in __clk_set_parent()
2030 __clk_set_parent_after(core, parent, old_parent); in __clk_set_parent()
2037 * @core: first clk in the subtree
2049 static int __clk_speculate_rates(struct clk_core *core, in __clk_speculate_rates() argument
2058 new_rate = clk_recalc(core, parent_rate); in __clk_speculate_rates()
2061 if (core->notifier_count) in __clk_speculate_rates()
2062 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); in __clk_speculate_rates()
2066 __func__, core->name, ret); in __clk_speculate_rates()
2070 hlist_for_each_entry(child, &core->children, child_node) { in __clk_speculate_rates()
2080 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, in clk_calc_subtree() argument
2085 core->new_rate = new_rate; in clk_calc_subtree()
2086 core->new_parent = new_parent; in clk_calc_subtree()
2087 core->new_parent_index = p_index; in clk_calc_subtree()
2089 core->new_child = NULL; in clk_calc_subtree()
2090 if (new_parent && new_parent != core->parent) in clk_calc_subtree()
2091 new_parent->new_child = core; in clk_calc_subtree()
2093 hlist_for_each_entry(child, &core->children, child_node) { in clk_calc_subtree()
2103 static struct clk_core *clk_calc_new_rates(struct clk_core *core, in clk_calc_new_rates() argument
2106 struct clk_core *top = core; in clk_calc_new_rates()
2116 if (IS_ERR_OR_NULL(core)) in clk_calc_new_rates()
2120 parent = old_parent = core->parent; in clk_calc_new_rates()
2124 clk_core_get_boundaries(core, &min_rate, &max_rate); in clk_calc_new_rates()
2127 if (clk_core_can_round(core)) { in clk_calc_new_rates()
2130 clk_core_init_rate_req(core, &req, rate); in clk_calc_new_rates()
2132 ret = clk_core_determine_round_nolock(core, &req); in clk_calc_new_rates()
2138 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; in clk_calc_new_rates()
2142 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { in clk_calc_new_rates()
2144 core->new_rate = core->rate; in clk_calc_new_rates()
2155 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { in clk_calc_new_rates()
2157 __func__, core->name); in clk_calc_new_rates()
2162 if (parent && core->num_parents > 1) { in clk_calc_new_rates()
2163 p_index = clk_fetch_parent_index(core, parent); in clk_calc_new_rates()
2166 __func__, parent->name, core->name); in clk_calc_new_rates()
2171 if ((core->flags & CLK_SET_RATE_PARENT) && parent && in clk_calc_new_rates()
2176 clk_calc_subtree(core, new_rate, parent, p_index); in clk_calc_new_rates()
2186 static struct clk_core *clk_propagate_rate_change(struct clk_core *core, in clk_propagate_rate_change() argument
2192 if (core->rate == core->new_rate) in clk_propagate_rate_change()
2195 if (core->notifier_count) { in clk_propagate_rate_change()
2196 ret = __clk_notify(core, event, core->rate, core->new_rate); in clk_propagate_rate_change()
2198 fail_clk = core; in clk_propagate_rate_change()
2201 hlist_for_each_entry(child, &core->children, child_node) { in clk_propagate_rate_change()
2203 if (child->new_parent && child->new_parent != core) in clk_propagate_rate_change()
2210 /* handle the new child who might not be in core->children yet */ in clk_propagate_rate_change()
2211 if (core->new_child) { in clk_propagate_rate_change()
2212 tmp_clk = clk_propagate_rate_change(core->new_child, event); in clk_propagate_rate_change()
2224 static void clk_change_rate(struct clk_core *core) in clk_change_rate() argument
2234 old_rate = core->rate; in clk_change_rate()
2236 if (core->new_parent) { in clk_change_rate()
2237 parent = core->new_parent; in clk_change_rate()
2238 best_parent_rate = core->new_parent->rate; in clk_change_rate()
2239 } else if (core->parent) { in clk_change_rate()
2240 parent = core->parent; in clk_change_rate()
2241 best_parent_rate = core->parent->rate; in clk_change_rate()
2244 if (clk_pm_runtime_get(core)) in clk_change_rate()
2247 if (core->flags & CLK_SET_RATE_UNGATE) { in clk_change_rate()
2248 clk_core_prepare(core); in clk_change_rate()
2249 clk_core_enable_lock(core); in clk_change_rate()
2252 if (core->new_parent && core->new_parent != core->parent) { in clk_change_rate()
2253 old_parent = __clk_set_parent_before(core, core->new_parent); in clk_change_rate()
2254 trace_clk_set_parent(core, core->new_parent); in clk_change_rate()
2256 if (core->ops->set_rate_and_parent) { in clk_change_rate()
2258 core->ops->set_rate_and_parent(core->hw, core->new_rate, in clk_change_rate()
2260 core->new_parent_index); in clk_change_rate()
2261 } else if (core->ops->set_parent) { in clk_change_rate()
2262 core->ops->set_parent(core->hw, core->new_parent_index); in clk_change_rate()
2265 trace_clk_set_parent_complete(core, core->new_parent); in clk_change_rate()
2266 __clk_set_parent_after(core, core->new_parent, old_parent); in clk_change_rate()
2269 if (core->flags & CLK_OPS_PARENT_ENABLE) in clk_change_rate()
2272 trace_clk_set_rate(core, core->new_rate); in clk_change_rate()
2274 if (!skip_set_rate && core->ops->set_rate) in clk_change_rate()
2275 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); in clk_change_rate()
2277 trace_clk_set_rate_complete(core, core->new_rate); in clk_change_rate()
2279 core->rate = clk_recalc(core, best_parent_rate); in clk_change_rate()
2281 if (core->flags & CLK_SET_RATE_UNGATE) { in clk_change_rate()
2282 clk_core_disable_lock(core); in clk_change_rate()
2283 clk_core_unprepare(core); in clk_change_rate()
2286 if (core->flags & CLK_OPS_PARENT_ENABLE) in clk_change_rate()
2289 if (core->notifier_count && old_rate != core->rate) in clk_change_rate()
2290 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); in clk_change_rate()
2292 if (core->flags & CLK_RECALC_NEW_RATES) in clk_change_rate()
2293 (void)clk_calc_new_rates(core, core->new_rate); in clk_change_rate()
2299 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { in clk_change_rate()
2301 if (child->new_parent && child->new_parent != core) in clk_change_rate()
2306 /* handle the new child who might not be in core->children yet */ in clk_change_rate()
2307 if (core->new_child) in clk_change_rate()
2308 clk_change_rate(core->new_child); in clk_change_rate()
2310 clk_pm_runtime_put(core); in clk_change_rate()
2313 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, in clk_core_req_round_rate_nolock() argument
2321 if (!core) in clk_core_req_round_rate_nolock()
2325 cnt = clk_core_rate_nuke_protect(core); in clk_core_req_round_rate_nolock()
2329 clk_core_init_rate_req(core, &req, req_rate); in clk_core_req_round_rate_nolock()
2331 ret = clk_core_round_rate_nolock(core, &req); in clk_core_req_round_rate_nolock()
2334 clk_core_rate_restore_protect(core, cnt); in clk_core_req_round_rate_nolock()
2339 static int clk_core_set_rate_nolock(struct clk_core *core, in clk_core_set_rate_nolock() argument
2346 if (!core) in clk_core_set_rate_nolock()
2349 rate = clk_core_req_round_rate_nolock(core, req_rate); in clk_core_set_rate_nolock()
2352 if (rate == clk_core_get_rate_nolock(core)) in clk_core_set_rate_nolock()
2356 if (clk_core_rate_is_protected(core)) in clk_core_set_rate_nolock()
2360 top = clk_calc_new_rates(core, req_rate); in clk_core_set_rate_nolock()
2364 ret = clk_pm_runtime_get(core); in clk_core_set_rate_nolock()
2381 core->req_rate = req_rate; in clk_core_set_rate_nolock()
2383 clk_pm_runtime_put(core); in clk_core_set_rate_nolock()
2420 clk_core_rate_unprotect(clk->core); in clk_set_rate()
2422 ret = clk_core_set_rate_nolock(clk->core, rate); in clk_set_rate()
2425 clk_core_rate_protect(clk->core); in clk_set_rate()
2468 ret = clk_core_set_rate_nolock(clk->core, rate); in clk_set_rate_exclusive()
2470 clk_core_rate_protect(clk->core); in clk_set_rate_exclusive()
2492 trace_clk_set_rate_range(clk->core, min, max); in clk_set_rate_range_nolock()
2496 __func__, clk->core->name, clk->dev_id, clk->con_id, in clk_set_rate_range_nolock()
2502 clk_core_rate_unprotect(clk->core); in clk_set_rate_range_nolock()
2510 if (!clk_core_check_boundaries(clk->core, min, max)) { in clk_set_rate_range_nolock()
2515 rate = clk->core->req_rate; in clk_set_rate_range_nolock()
2516 if (clk->core->flags & CLK_GET_RATE_NOCACHE) in clk_set_rate_range_nolock()
2517 rate = clk_core_get_rate_recalc(clk->core); in clk_set_rate_range_nolock()
2537 ret = clk_core_set_rate_nolock(clk->core, rate); in clk_set_rate_range_nolock()
2546 clk_core_rate_protect(clk->core); in clk_set_rate_range_nolock()
2588 trace_clk_set_min_rate(clk->core, rate); in clk_set_min_rate()
2606 trace_clk_set_max_rate(clk->core, rate); in clk_set_max_rate()
2627 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; in clk_get_parent()
2634 static struct clk_core *__clk_init_parent(struct clk_core *core) in __clk_init_parent() argument
2638 if (core->num_parents > 1 && core->ops->get_parent) in __clk_init_parent()
2639 index = core->ops->get_parent(core->hw); in __clk_init_parent()
2641 return clk_core_get_parent_by_index(core, index); in __clk_init_parent()
2644 static void clk_core_reparent(struct clk_core *core, in clk_core_reparent() argument
2647 clk_reparent(core, new_parent); in clk_core_reparent()
2648 __clk_recalc_accuracies(core); in clk_core_reparent()
2649 __clk_recalc_rates(core, true, POST_RATE_CHANGE); in clk_core_reparent()
2657 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); in clk_hw_reparent()
2676 return clk_core_has_parent(clk->core, parent->core); in clk_has_parent()
2680 static int clk_core_set_parent_nolock(struct clk_core *core, in clk_core_set_parent_nolock() argument
2689 if (!core) in clk_core_set_parent_nolock()
2692 if (core->parent == parent) in clk_core_set_parent_nolock()
2696 if (core->num_parents > 1 && !core->ops->set_parent) in clk_core_set_parent_nolock()
2700 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) in clk_core_set_parent_nolock()
2703 if (clk_core_rate_is_protected(core)) in clk_core_set_parent_nolock()
2708 p_index = clk_fetch_parent_index(core, parent); in clk_core_set_parent_nolock()
2711 __func__, parent->name, core->name); in clk_core_set_parent_nolock()
2717 ret = clk_pm_runtime_get(core); in clk_core_set_parent_nolock()
2722 ret = __clk_speculate_rates(core, p_rate); in clk_core_set_parent_nolock()
2729 ret = __clk_set_parent(core, parent, p_index); in clk_core_set_parent_nolock()
2733 __clk_recalc_rates(core, true, ABORT_RATE_CHANGE); in clk_core_set_parent_nolock()
2735 __clk_recalc_rates(core, true, POST_RATE_CHANGE); in clk_core_set_parent_nolock()
2736 __clk_recalc_accuracies(core); in clk_core_set_parent_nolock()
2740 clk_pm_runtime_put(core); in clk_core_set_parent_nolock()
2747 return clk_core_set_parent_nolock(hw->core, parent->core); in clk_hw_set_parent()
2778 clk_core_rate_unprotect(clk->core); in clk_set_parent()
2780 ret = clk_core_set_parent_nolock(clk->core, in clk_set_parent()
2781 parent ? parent->core : NULL); in clk_set_parent()
2784 clk_core_rate_protect(clk->core); in clk_set_parent()
2792 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) in clk_core_set_phase_nolock() argument
2798 if (!core) in clk_core_set_phase_nolock()
2801 if (clk_core_rate_is_protected(core)) in clk_core_set_phase_nolock()
2804 trace_clk_set_phase(core, degrees); in clk_core_set_phase_nolock()
2806 if (core->ops->set_phase) { in clk_core_set_phase_nolock()
2807 ret = core->ops->set_phase(core->hw, degrees); in clk_core_set_phase_nolock()
2809 core->phase = degrees; in clk_core_set_phase_nolock()
2812 trace_clk_set_phase_complete(core, degrees); in clk_core_set_phase_nolock()
2852 clk_core_rate_unprotect(clk->core); in clk_set_phase()
2854 ret = clk_core_set_phase_nolock(clk->core, degrees); in clk_set_phase()
2857 clk_core_rate_protect(clk->core); in clk_set_phase()
2865 static int clk_core_get_phase(struct clk_core *core) in clk_core_get_phase() argument
2870 if (!core->ops->get_phase) in clk_core_get_phase()
2874 ret = core->ops->get_phase(core->hw); in clk_core_get_phase()
2876 core->phase = ret; in clk_core_get_phase()
2896 ret = clk_core_get_phase(clk->core); in clk_get_phase()
2903 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) in clk_core_reset_duty_cycle_nolock() argument
2906 core->duty.num = 1; in clk_core_reset_duty_cycle_nolock()
2907 core->duty.den = 2; in clk_core_reset_duty_cycle_nolock()
2910 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2912 static int clk_core_update_duty_cycle_nolock(struct clk_core *core) in clk_core_update_duty_cycle_nolock() argument
2914 struct clk_duty *duty = &core->duty; in clk_core_update_duty_cycle_nolock()
2917 if (!core->ops->get_duty_cycle) in clk_core_update_duty_cycle_nolock()
2918 return clk_core_update_duty_cycle_parent_nolock(core); in clk_core_update_duty_cycle_nolock()
2920 ret = core->ops->get_duty_cycle(core->hw, duty); in clk_core_update_duty_cycle_nolock()
2933 clk_core_reset_duty_cycle_nolock(core); in clk_core_update_duty_cycle_nolock()
2937 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) in clk_core_update_duty_cycle_parent_nolock() argument
2941 if (core->parent && in clk_core_update_duty_cycle_parent_nolock()
2942 core->flags & CLK_DUTY_CYCLE_PARENT) { in clk_core_update_duty_cycle_parent_nolock()
2943 ret = clk_core_update_duty_cycle_nolock(core->parent); in clk_core_update_duty_cycle_parent_nolock()
2944 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); in clk_core_update_duty_cycle_parent_nolock()
2946 clk_core_reset_duty_cycle_nolock(core); in clk_core_update_duty_cycle_parent_nolock()
2952 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2955 static int clk_core_set_duty_cycle_nolock(struct clk_core *core, in clk_core_set_duty_cycle_nolock() argument
2962 if (clk_core_rate_is_protected(core)) in clk_core_set_duty_cycle_nolock()
2965 trace_clk_set_duty_cycle(core, duty); in clk_core_set_duty_cycle_nolock()
2967 if (!core->ops->set_duty_cycle) in clk_core_set_duty_cycle_nolock()
2968 return clk_core_set_duty_cycle_parent_nolock(core, duty); in clk_core_set_duty_cycle_nolock()
2970 ret = core->ops->set_duty_cycle(core->hw, duty); in clk_core_set_duty_cycle_nolock()
2972 memcpy(&core->duty, duty, sizeof(*duty)); in clk_core_set_duty_cycle_nolock()
2974 trace_clk_set_duty_cycle_complete(core, duty); in clk_core_set_duty_cycle_nolock()
2979 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, in clk_core_set_duty_cycle_parent_nolock() argument
2984 if (core->parent && in clk_core_set_duty_cycle_parent_nolock()
2985 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { in clk_core_set_duty_cycle_parent_nolock()
2986 ret = clk_core_set_duty_cycle_nolock(core->parent, duty); in clk_core_set_duty_cycle_parent_nolock()
2987 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); in clk_core_set_duty_cycle_parent_nolock()
3022 clk_core_rate_unprotect(clk->core); in clk_set_duty_cycle()
3024 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); in clk_set_duty_cycle()
3027 clk_core_rate_protect(clk->core); in clk_set_duty_cycle()
3035 static int clk_core_get_scaled_duty_cycle(struct clk_core *core, in clk_core_get_scaled_duty_cycle() argument
3038 struct clk_duty *duty = &core->duty; in clk_core_get_scaled_duty_cycle()
3043 ret = clk_core_update_duty_cycle_nolock(core); in clk_core_get_scaled_duty_cycle()
3065 return clk_core_get_scaled_duty_cycle(clk->core, scale); in clk_get_scaled_duty_cycle()
3086 /* true if clk->core pointers match. Avoid dereferencing garbage */ in clk_is_match()
3088 if (p->core == q->core) in clk_is_match()
3243 struct clk_core *core = data; in clk_rate_set() local
3247 ret = clk_core_set_rate_nolock(core, val); in clk_rate_set()
3257 struct clk_core *core = data; in clk_prepare_enable_set() local
3261 ret = clk_prepare_enable(core->hw->clk); in clk_prepare_enable_set()
3263 clk_disable_unprepare(core->hw->clk); in clk_prepare_enable_set()
3270 struct clk_core *core = data; in clk_prepare_enable_get() local
3272 *val = core->enable_count && core->prepare_count; in clk_prepare_enable_get()
3286 struct clk_core *core = data; in clk_rate_get() local
3289 *val = clk_core_get_rate_recalc(core); in clk_rate_get()
3319 struct clk_core *core = s->private; in clk_flags_show() local
3320 unsigned long flags = core->flags; in clk_flags_show()
3338 static void possible_parent_show(struct seq_file *s, struct clk_core *core, in possible_parent_show() argument
3355 parent = clk_core_get_parent_by_index(core, i); in possible_parent_show()
3358 else if (core->parents[i].name) in possible_parent_show()
3359 seq_puts(s, core->parents[i].name); in possible_parent_show()
3360 else if (core->parents[i].fw_name) in possible_parent_show()
3361 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); in possible_parent_show()
3362 else if (core->parents[i].index >= 0) in possible_parent_show()
3364 of_clk_get_parent_name(core->of_node, in possible_parent_show()
3365 core->parents[i].index)); in possible_parent_show()
3374 struct clk_core *core = s->private; in possible_parents_show() local
3377 for (i = 0; i < core->num_parents - 1; i++) in possible_parents_show()
3378 possible_parent_show(s, core, i, ' '); in possible_parents_show()
3380 possible_parent_show(s, core, i, '\n'); in possible_parents_show()
3388 struct clk_core *core = s->private; in current_parent_show() local
3390 if (core->parent) in current_parent_show()
3391 seq_printf(s, "%s\n", core->parent->name); in current_parent_show()
3402 struct clk_core *core = s->private; in current_parent_write() local
3411 parent = clk_core_get_parent_by_index(core, idx); in current_parent_write()
3416 err = clk_core_set_parent_nolock(core, parent); in current_parent_write()
3435 struct clk_core *core = s->private; in clk_duty_cycle_show() local
3436 struct clk_duty *duty = &core->duty; in clk_duty_cycle_show()
3446 struct clk_core *core = s->private; in clk_min_rate_show() local
3450 clk_core_get_boundaries(core, &min_rate, &max_rate); in clk_min_rate_show()
3460 struct clk_core *core = s->private; in clk_max_rate_show() local
3464 clk_core_get_boundaries(core, &min_rate, &max_rate); in clk_max_rate_show()
3472 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) in clk_debug_create_one() argument
3476 if (!core || !pdentry) in clk_debug_create_one()
3479 root = debugfs_create_dir(core->name, pdentry); in clk_debug_create_one()
3480 core->dentry = root; in clk_debug_create_one()
3482 debugfs_create_file("clk_rate", clk_rate_mode, root, core, in clk_debug_create_one()
3484 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops); in clk_debug_create_one()
3485 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops); in clk_debug_create_one()
3486 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); in clk_debug_create_one()
3487 debugfs_create_u32("clk_phase", 0444, root, &core->phase); in clk_debug_create_one()
3488 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); in clk_debug_create_one()
3489 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); in clk_debug_create_one()
3490 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); in clk_debug_create_one()
3491 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); in clk_debug_create_one()
3492 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); in clk_debug_create_one()
3493 debugfs_create_file("clk_duty_cycle", 0444, root, core, in clk_debug_create_one()
3496 debugfs_create_file("clk_prepare_enable", 0644, root, core, in clk_debug_create_one()
3499 if (core->num_parents > 1) in clk_debug_create_one()
3500 debugfs_create_file("clk_parent", 0644, root, core, in clk_debug_create_one()
3504 if (core->num_parents > 0) in clk_debug_create_one()
3505 debugfs_create_file("clk_parent", 0444, root, core, in clk_debug_create_one()
3508 if (core->num_parents > 1) in clk_debug_create_one()
3509 debugfs_create_file("clk_possible_parents", 0444, root, core, in clk_debug_create_one()
3512 if (core->ops->debug_init) in clk_debug_create_one()
3513 core->ops->debug_init(core->hw, core->dentry); in clk_debug_create_one()
3518 * @core: the clk being added to the debugfs clk directory
3524 static void clk_debug_register(struct clk_core *core) in clk_debug_register() argument
3527 hlist_add_head(&core->debug_node, &clk_debug_list); in clk_debug_register()
3529 clk_debug_create_one(core, rootdir); in clk_debug_register()
3535 * @core: the clk being removed from the debugfs clk directory
3541 static void clk_debug_unregister(struct clk_core *core) in clk_debug_unregister() argument
3544 hlist_del_init(&core->debug_node); in clk_debug_unregister()
3545 debugfs_remove_recursive(core->dentry); in clk_debug_unregister()
3546 core->dentry = NULL; in clk_debug_unregister()
3561 struct clk_core *core; in clk_debug_init() local
3593 hlist_for_each_entry(core, &clk_debug_list, debug_node) in clk_debug_init()
3594 clk_debug_create_one(core, rootdir); in clk_debug_init()
3603 static inline void clk_debug_register(struct clk_core *core) { } in clk_debug_register() argument
3604 static inline void clk_debug_unregister(struct clk_core *core) in clk_debug_unregister() argument
3652 * @core: clk_core being initialized
3657 static int __clk_core_init(struct clk_core *core) in __clk_core_init() argument
3667 * Set hw->core after grabbing the prepare_lock to synchronize with in __clk_core_init()
3668 * callers of clk_core_fill_parent_index() where we treat hw->core in __clk_core_init()
3672 core->hw->core = core; in __clk_core_init()
3674 ret = clk_pm_runtime_get(core); in __clk_core_init()
3679 if (clk_core_lookup(core->name)) { in __clk_core_init()
3681 __func__, core->name); in __clk_core_init()
3687 if (core->ops->set_rate && in __clk_core_init()
3688 !((core->ops->round_rate || core->ops->determine_rate) && in __clk_core_init()
3689 core->ops->recalc_rate)) { in __clk_core_init()
3691 __func__, core->name); in __clk_core_init()
3696 if (core->ops->set_parent && !core->ops->get_parent) { in __clk_core_init()
3698 __func__, core->name); in __clk_core_init()
3703 if (core->num_parents > 1 && !core->ops->get_parent) { in __clk_core_init()
3705 __func__, core->name); in __clk_core_init()
3710 if (core->ops->set_rate_and_parent && in __clk_core_init()
3711 !(core->ops->set_parent && core->ops->set_rate)) { in __clk_core_init()
3713 __func__, core->name); in __clk_core_init()
3732 if (core->ops->init) { in __clk_core_init()
3733 ret = core->ops->init(core->hw); in __clk_core_init()
3738 parent = core->parent = __clk_init_parent(core); in __clk_core_init()
3741 * Populate core->parent if parent has already been clk_core_init'd. If in __clk_core_init()
3751 hlist_add_head(&core->child_node, &parent->children); in __clk_core_init()
3752 core->orphan = parent->orphan; in __clk_core_init()
3753 } else if (!core->num_parents) { in __clk_core_init()
3754 hlist_add_head(&core->child_node, &clk_root_list); in __clk_core_init()
3755 core->orphan = false; in __clk_core_init()
3757 hlist_add_head(&core->child_node, &clk_orphan_list); in __clk_core_init()
3758 core->orphan = true; in __clk_core_init()
3768 if (core->ops->recalc_accuracy) in __clk_core_init()
3769 core->accuracy = core->ops->recalc_accuracy(core->hw, in __clk_core_init()
3772 core->accuracy = parent->accuracy; in __clk_core_init()
3774 core->accuracy = 0; in __clk_core_init()
3781 phase = clk_core_get_phase(core); in __clk_core_init()
3785 core->name); in __clk_core_init()
3792 clk_core_update_duty_cycle_nolock(core); in __clk_core_init()
3800 if (core->ops->recalc_rate) in __clk_core_init()
3801 rate = core->ops->recalc_rate(core->hw, in __clk_core_init()
3807 core->rate = core->req_rate = rate; in __clk_core_init()
3814 if (core->flags & CLK_IS_CRITICAL) { in __clk_core_init()
3815 ret = clk_core_prepare(core); in __clk_core_init()
3818 __func__, core->name); in __clk_core_init()
3822 ret = clk_core_enable_lock(core); in __clk_core_init()
3825 __func__, core->name); in __clk_core_init()
3826 clk_core_unprepare(core); in __clk_core_init()
3833 kref_init(&core->ref); in __clk_core_init()
3835 clk_pm_runtime_put(core); in __clk_core_init()
3838 hlist_del_init(&core->child_node); in __clk_core_init()
3839 core->hw->core = NULL; in __clk_core_init()
3845 clk_debug_register(core); in __clk_core_init()
3852 * @core: clk to add consumer to
3855 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk) in clk_core_link_consumer() argument
3858 hlist_add_head(&clk->clks_node, &core->clks); in clk_core_link_consumer()
3874 * @core: clk to allocate a consumer for
3880 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id, in alloc_clk() argument
3889 clk->core = core; in alloc_clk()
3926 struct clk_core *core; in clk_hw_create_clk() local
3932 core = hw->core; in clk_hw_create_clk()
3933 clk = alloc_clk(core, dev_id, con_id); in clk_hw_create_clk()
3938 if (!try_module_get(core->owner)) { in clk_hw_create_clk()
3943 kref_get(&core->ref); in clk_hw_create_clk()
3944 clk_core_link_consumer(core, clk); in clk_hw_create_clk()
3961 struct device *dev = hw->core->dev; in clk_hw_get_clk()
3985 static int clk_core_populate_parent_map(struct clk_core *core, in clk_core_populate_parent_map() argument
4003 core->parents = parents; in clk_core_populate_parent_map()
4014 __func__, core->name); in clk_core_populate_parent_map()
4047 static void clk_core_free_parent_map(struct clk_core *core) in clk_core_free_parent_map() argument
4049 int i = core->num_parents; in clk_core_free_parent_map()
4051 if (!core->num_parents) in clk_core_free_parent_map()
4055 kfree_const(core->parents[i].name); in clk_core_free_parent_map()
4056 kfree_const(core->parents[i].fw_name); in clk_core_free_parent_map()
4059 kfree(core->parents); in clk_core_free_parent_map()
4066 struct clk_core *core; in __clk_register() local
4072 * we catch use of hw->init early on in the core. in __clk_register()
4076 core = kzalloc(sizeof(*core), GFP_KERNEL); in __clk_register()
4077 if (!core) { in __clk_register()
4082 core->name = kstrdup_const(init->name, GFP_KERNEL); in __clk_register()
4083 if (!core->name) { in __clk_register()
4092 core->ops = init->ops; in __clk_register()
4095 core->rpm_enabled = true; in __clk_register()
4096 core->dev = dev; in __clk_register()
4097 core->of_node = np; in __clk_register()
4099 core->owner = dev->driver->owner; in __clk_register()
4100 core->hw = hw; in __clk_register()
4101 core->flags = init->flags; in __clk_register()
4102 core->num_parents = init->num_parents; in __clk_register()
4103 core->min_rate = 0; in __clk_register()
4104 core->max_rate = ULONG_MAX; in __clk_register()
4106 ret = clk_core_populate_parent_map(core, init); in __clk_register()
4110 INIT_HLIST_HEAD(&core->clks); in __clk_register()
4116 hw->clk = alloc_clk(core, NULL, NULL); in __clk_register()
4122 clk_core_link_consumer(core, hw->clk); in __clk_register()
4124 ret = __clk_core_init(core); in __clk_register()
4136 clk_core_free_parent_map(core); in __clk_register()
4139 kfree_const(core->name); in __clk_register()
4141 kfree(core); in __clk_register()
4224 struct clk_core *core = container_of(ref, struct clk_core, ref); in __clk_release() local
4228 clk_core_free_parent_map(core); in __clk_release()
4229 kfree_const(core->name); in __clk_release()
4230 kfree(core); in __clk_release()
4275 if (root->parents[i].core == target) in clk_core_evict_parent_cache_subtree()
4276 root->parents[i].core = NULL; in clk_core_evict_parent_cache_subtree()
4283 static void clk_core_evict_parent_cache(struct clk_core *core) in clk_core_evict_parent_cache() argument
4292 clk_core_evict_parent_cache_subtree(root, core); in clk_core_evict_parent_cache()
4308 clk_debug_unregister(clk->core); in clk_unregister()
4312 ops = clk->core->ops; in clk_unregister()
4315 clk->core->name); in clk_unregister()
4323 clk->core->ops = &clk_nodrv_ops; in clk_unregister()
4327 ops->terminate(clk->core->hw); in clk_unregister()
4329 if (!hlist_empty(&clk->core->children)) { in clk_unregister()
4334 hlist_for_each_entry_safe(child, t, &clk->core->children, in clk_unregister()
4339 clk_core_evict_parent_cache(clk->core); in clk_unregister()
4341 hlist_del_init(&clk->core->child_node); in clk_unregister()
4343 if (clk->core->prepare_count) in clk_unregister()
4345 __func__, clk->core->name); in clk_unregister()
4347 if (clk->core->protect_count) in clk_unregister()
4349 __func__, clk->core->name); in clk_unregister()
4351 kref_put(&clk->core->ref, __clk_release); in clk_unregister()
4464 WARN_ON_ONCE(dev != hw->core->dev); in devm_clk_hw_get_clk()
4502 clk->core->protect_count -= (clk->exclusive_count - 1); in __clk_put()
4503 clk_core_rate_unprotect(clk->core); in __clk_put()
4513 owner = clk->core->owner; in __clk_put()
4514 kref_put(&clk->core->ref, __clk_release); in __clk_put()
4573 clk->core->notifier_count++; in clk_notifier_register()
4607 clk->core->notifier_count--; in clk_notifier_unregister()