Lines Matching +full:step +full:- +full:down
34 #include <linux/percpu-rwsem.h>
44 * struct cpuhp_cpu_state - Per cpu hotplug state storage
54 * @node: Remote CPU node; for multi-instance, do a
56 * @last: For multi-instance rollback, remember how far we got
59 * @done_up: Signal completion to the issuer of the task for cpu-up
60 * @done_down: Signal completion to the issuer of the task for cpu-down
92 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
94 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
114 * struct cpuhp_step - Hotplug state machine step
115 * @name: Name of the step
116 * @startup: Startup function of the step
117 * @teardown: Teardown function of the step
118 * @cant_stop: Bringup/teardown can't be stopped at this step
148 static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step) in cpuhp_step_empty() argument
150 return bringup ? !step->startup.single : !step->teardown.single; in cpuhp_step_empty()
154 * cpuhp_invoke_callback - Invoke the callbacks for a given state
158 * @node: For multi-instance, do a single entry callback for install/remove
159 * @lastp: For multi-instance rollback, remember how far we got
170 struct cpuhp_step *step = cpuhp_get_step(state); in cpuhp_invoke_callback() local
175 if (st->fail == state) { in cpuhp_invoke_callback()
176 st->fail = CPUHP_INVALID; in cpuhp_invoke_callback()
177 return -EAGAIN; in cpuhp_invoke_callback()
180 if (cpuhp_step_empty(bringup, step)) { in cpuhp_invoke_callback()
185 if (!step->multi_instance) { in cpuhp_invoke_callback()
187 cb = bringup ? step->startup.single : step->teardown.single; in cpuhp_invoke_callback()
189 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback()
191 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
194 cbm = bringup ? step->startup.multi : step->teardown.multi; in cpuhp_invoke_callback()
199 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
201 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
207 hlist_for_each(node, &step->list) { in cpuhp_invoke_callback()
211 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
213 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
228 cbm = !bringup ? step->startup.multi : step->teardown.multi; in cpuhp_invoke_callback()
232 hlist_for_each(node, &step->list) { in cpuhp_invoke_callback()
233 if (!cnt--) in cpuhp_invoke_callback()
236 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
238 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
259 struct completion *done = bringup ? &st->done_up : &st->done_down; in wait_for_ap_thread()
265 struct completion *done = bringup ? &st->done_up : &st->done_down; in complete_ap_thread()
297 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
384 cpu_hotplug_disabled--; in __cpu_hotplug_enable()
408 * Architectures that need SMT-specific errata handling during SMT hotplug
478 enum cpuhp_state prev_state = st->state; in cpuhp_set_state()
479 bool bringup = st->state < target; in cpuhp_set_state()
481 st->rollback = false; in cpuhp_set_state()
482 st->last = NULL; in cpuhp_set_state()
484 st->target = target; in cpuhp_set_state()
485 st->single = false; in cpuhp_set_state()
486 st->bringup = bringup; in cpuhp_set_state()
487 if (cpu_dying(st->cpu) != !bringup) in cpuhp_set_state()
488 set_cpu_dying(st->cpu, !bringup); in cpuhp_set_state()
496 bool bringup = !st->bringup; in cpuhp_reset_state()
498 st->target = prev_state; in cpuhp_reset_state()
504 if (st->rollback) in cpuhp_reset_state()
507 st->rollback = true; in cpuhp_reset_state()
510 * If we have st->last we need to undo partial multi_instance of this in cpuhp_reset_state()
513 if (!st->last) { in cpuhp_reset_state()
514 if (st->bringup) in cpuhp_reset_state()
515 st->state--; in cpuhp_reset_state()
517 st->state++; in cpuhp_reset_state()
520 st->bringup = bringup; in cpuhp_reset_state()
521 if (cpu_dying(st->cpu) != !bringup) in cpuhp_reset_state()
522 set_cpu_dying(st->cpu, !bringup); in cpuhp_reset_state()
528 if (!st->single && st->state == st->target) in __cpuhp_kick_ap()
531 st->result = 0; in __cpuhp_kick_ap()
537 st->should_run = true; in __cpuhp_kick_ap()
538 wake_up_process(st->thread); in __cpuhp_kick_ap()
539 wait_for_ap_thread(st, st->bringup); in __cpuhp_kick_ap()
549 if ((ret = st->result)) { in cpuhp_kick_ap()
564 return -ECANCELED; in bringup_wait_for_ap()
567 kthread_unpark(st->thread); in bringup_wait_for_ap()
577 return -ECANCELED; in bringup_wait_for_ap()
579 if (st->target <= CPUHP_AP_ONLINE_IDLE) in bringup_wait_for_ap()
582 return cpuhp_kick_ap(st, st->target); in bringup_wait_for_ap()
597 /* Arch-specific enabling code. */ in bringup_cpu()
608 struct mm_struct *mm = idle->active_mm; in finish_cpu()
615 idle->active_mm = &init_mm; in finish_cpu()
628 * st->state will be modified ahead of time, to match state_to_run, as if it
638 if (st->state >= target) in cpuhp_next_state()
641 *state_to_run = ++st->state; in cpuhp_next_state()
643 if (st->state <= target) in cpuhp_next_state()
646 *state_to_run = st->state--; in cpuhp_next_state()
678 * When CPU hotplug is disabled, then taking the CPU down is not in can_rollback_cpu()
684 return st->state <= CPUHP_BRINGUP_CPU; in can_rollback_cpu()
690 enum cpuhp_state prev_state = st->state; in cpuhp_up_callbacks()
696 ret, cpu, cpuhp_get_step(st->state)->name, in cpuhp_up_callbacks()
697 st->state); in cpuhp_up_callbacks()
714 init_completion(&st->done_up); in cpuhp_create()
715 init_completion(&st->done_down); in cpuhp_create()
716 st->cpu = cpu; in cpuhp_create()
723 return st->should_run; in cpuhp_should_run()
734 * - single: runs st->cb_state
735 * - up: runs ++st->state, while st->state < st->target
736 * - down: runs st->state--, while st->state > st->target
743 bool bringup = st->bringup; in cpuhp_thread_fun()
746 if (WARN_ON_ONCE(!st->should_run)) in cpuhp_thread_fun()
750 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures in cpuhp_thread_fun()
751 * that if we see ->should_run we also see the rest of the state. in cpuhp_thread_fun()
763 if (st->single) { in cpuhp_thread_fun()
764 state = st->cb_state; in cpuhp_thread_fun()
765 st->should_run = false; in cpuhp_thread_fun()
767 st->should_run = cpuhp_next_state(bringup, &state, st, st->target); in cpuhp_thread_fun()
768 if (!st->should_run) in cpuhp_thread_fun()
776 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun()
782 WARN_ON_ONCE(st->result); in cpuhp_thread_fun()
784 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun()
787 if (st->result) { in cpuhp_thread_fun()
793 WARN_ON_ONCE(st->rollback); in cpuhp_thread_fun()
794 st->should_run = false; in cpuhp_thread_fun()
801 if (!st->should_run) in cpuhp_thread_fun()
826 if (!st->thread) in cpuhp_invoke_ap_callback()
829 st->rollback = false; in cpuhp_invoke_ap_callback()
830 st->last = NULL; in cpuhp_invoke_ap_callback()
832 st->node = node; in cpuhp_invoke_ap_callback()
833 st->bringup = bringup; in cpuhp_invoke_ap_callback()
834 st->cb_state = state; in cpuhp_invoke_ap_callback()
835 st->single = true; in cpuhp_invoke_ap_callback()
842 if ((ret = st->result) && st->last) { in cpuhp_invoke_ap_callback()
843 st->rollback = true; in cpuhp_invoke_ap_callback()
844 st->bringup = !bringup; in cpuhp_invoke_ap_callback()
853 st->node = st->last = NULL; in cpuhp_invoke_ap_callback()
860 enum cpuhp_state prev_state = st->state; in cpuhp_kick_ap_work()
869 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); in cpuhp_kick_ap_work()
870 ret = cpuhp_kick_ap(st, st->target); in cpuhp_kick_ap_work()
871 trace_cpuhp_exit(cpu, st->state, prev_state, ret); in cpuhp_kick_ap_work()
943 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
948 * trivial, there are various non-obvious corner cases, which this function
959 * This function is called after the cpu is taken down and marked in clear_tasks_mm_cpumask()
961 * their mm mask. -- Peter Zijlstra in clear_tasks_mm_cpumask()
963 * full-fledged tasklist_lock. in clear_tasks_mm_cpumask()
977 arch_clear_mm_cpumask_cpu(cpu, t->mm); in clear_tasks_mm_cpumask()
983 /* Take this CPU down. */
987 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); in take_cpu_down()
998 * down, that the current state is CPUHP_TEARDOWN_CPU - 1. in take_cpu_down()
1000 WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1)); in take_cpu_down()
1025 kthread_park(st->thread); in takedown_cpu()
1041 kthread_unpark(st->thread); in takedown_cpu()
1054 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); in takedown_cpu()
1079 BUG_ON(st->state != CPUHP_AP_OFFLINE); in cpuhp_report_idle_dead()
1081 st->state = CPUHP_AP_IDLE_DEAD; in cpuhp_report_idle_dead()
1093 enum cpuhp_state prev_state = st->state; in cpuhp_down_callbacks()
1098 pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n", in cpuhp_down_callbacks()
1099 ret, cpu, cpuhp_get_step(st->state)->name, in cpuhp_down_callbacks()
1100 st->state); in cpuhp_down_callbacks()
1104 if (st->state < prev_state) in cpuhp_down_callbacks()
1120 return -EBUSY; in _cpu_down()
1123 return -EINVAL; in _cpu_down()
1134 if (st->state > CPUHP_TEARDOWN_CPU) { in _cpu_down()
1135 st->target = max((int)target, CPUHP_TEARDOWN_CPU); in _cpu_down()
1148 if (st->state > CPUHP_TEARDOWN_CPU) in _cpu_down()
1151 st->target = target; in _cpu_down()
1154 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need in _cpu_down()
1158 if (ret && st->state < prev_state) { in _cpu_down()
1159 if (st->state == CPUHP_TEARDOWN_CPU) { in _cpu_down()
1182 return -EBUSY; in cpu_down_maps_locked()
1197 * cpu_device_down - Bring down a cpu device
1208 return cpu_down(dev->id, CPUHP_OFFLINE); in cpu_device_down()
1244 pr_err("Failed to offline CPU%d - error=%d", in smp_shutdown_nonboot_cpus()
1270 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1279 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); in notify_cpu_starting()
1311 st->state = CPUHP_AP_ONLINE_IDLE; in cpuhp_online_idle()
1325 ret = -EINVAL; in _cpu_up()
1333 if (st->state >= target) in _cpu_up()
1336 if (st->state == CPUHP_OFFLINE) { in _cpu_up()
1352 if (st->state > CPUHP_BRINGUP_CPU) { in _cpu_up()
1381 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", in cpu_up()
1386 return -EINVAL; in cpu_up()
1396 err = -EBUSY; in cpu_up()
1400 err = -EPERM; in cpu_up()
1411 * cpu_device_up - Bring up a cpu device
1422 return cpu_up(dev->id, CPUHP_ONLINE); in cpu_device_up()
1438 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1455 pr_err("Failed to bring hibernate-CPU up!\n"); in bringup_hibernate_cpu()
1482 if (primary == -1) { in freeze_secondary_cpus()
1492 * We take down all of the non-boot CPUs in one shot to avoid races in freeze_secondary_cpus()
1497 pr_info("Disabling non-boot CPUs ...\n"); in freeze_secondary_cpus()
1504 error = -EBUSY; in freeze_secondary_cpus()
1514 pr_err("Error taking CPU%d down: %d\n", cpu, error); in freeze_secondary_cpus()
1522 pr_err("Non-boot CPUs are not disabled\n"); in freeze_secondary_cpus()
1553 pr_info("Enabling non-boot CPUs ...\n"); in thaw_secondary_cpus()
1578 return -ENOMEM; in alloc_frozen_cpus()
1686 * On the tear-down path, timers_dead_cpu() must be invoked
1809 return -EINVAL; in cpuhp_cb_check()
1821 struct cpuhp_step *step; in cpuhp_reserve_state() local
1825 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN; in cpuhp_reserve_state()
1829 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN; in cpuhp_reserve_state()
1833 return -EINVAL; in cpuhp_reserve_state()
1836 for (i = state; i <= end; i++, step++) { in cpuhp_reserve_state()
1837 if (!step->name) in cpuhp_reserve_state()
1841 return -ENOSPC; in cpuhp_reserve_state()
1870 if (name && sp->name) in cpuhp_store_callbacks()
1871 return -EBUSY; in cpuhp_store_callbacks()
1873 sp->startup.single = startup; in cpuhp_store_callbacks()
1874 sp->teardown.single = teardown; in cpuhp_store_callbacks()
1875 sp->name = name; in cpuhp_store_callbacks()
1876 sp->multi_instance = multi_instance; in cpuhp_store_callbacks()
1877 INIT_HLIST_HEAD(&sp->list); in cpuhp_store_callbacks()
1883 return cpuhp_get_step(state)->teardown.single; in cpuhp_get_teardown_cb()
1887 * Call the startup/teardown function for a step either on the AP or
1931 int cpustate = st->state; in cpuhp_rollback_install()
1953 if (sp->multi_instance == false) in __cpuhp_state_add_instance_cpuslocked()
1954 return -EINVAL; in __cpuhp_state_add_instance_cpuslocked()
1958 if (!invoke || !sp->startup.multi) in __cpuhp_state_add_instance_cpuslocked()
1967 int cpustate = st->state; in __cpuhp_state_add_instance_cpuslocked()
1974 if (sp->teardown.multi) in __cpuhp_state_add_instance_cpuslocked()
1981 hlist_add_head(node, &sp->list); in __cpuhp_state_add_instance_cpuslocked()
2000 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2002 * @name: Name of the step
2029 return -EINVAL; in __cpuhp_setup_state_cpuslocked()
2051 int cpustate = st->state; in __cpuhp_setup_state_cpuslocked()
2100 if (!sp->multi_instance) in __cpuhp_state_remove_instance()
2101 return -EINVAL; in __cpuhp_state_remove_instance()
2115 int cpustate = st->state; in __cpuhp_state_remove_instance()
2131 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2150 if (sp->multi_instance) { in __cpuhp_remove_state_cpuslocked()
2151 WARN(!hlist_empty(&sp->list), in __cpuhp_remove_state_cpuslocked()
2167 int cpustate = st->state; in __cpuhp_remove_state_cpuslocked()
2191 dev->offline = true; in cpuhp_offline_cpu_device()
2193 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); in cpuhp_offline_cpu_device()
2200 dev->offline = false; in cpuhp_online_cpu_device()
2202 kobject_uevent(&dev->kobj, KOBJ_ONLINE); in cpuhp_online_cpu_device()
2262 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in state_show()
2264 return sprintf(buf, "%d\n", st->state); in state_show()
2271 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in target_store()
2281 return -EINVAL; in target_store()
2284 return -EINVAL; in target_store()
2293 ret = !sp->name || sp->cant_stop ? -EINVAL : 0; in target_store()
2298 if (st->state < target) in target_store()
2299 ret = cpu_up(dev->id, target); in target_store()
2301 ret = cpu_down(dev->id, target); in target_store()
2310 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in target_show()
2312 return sprintf(buf, "%d\n", st->target); in target_show()
2319 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in fail_store()
2328 st->fail = fail; in fail_store()
2333 return -EINVAL; in fail_store()
2339 return -EINVAL; in fail_store()
2347 if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU) in fail_store()
2348 return -EINVAL; in fail_store()
2355 if (!sp->startup.single && !sp->teardown.single) in fail_store()
2356 ret = -EINVAL; in fail_store()
2361 st->fail = fail; in fail_store()
2369 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in fail_show()
2371 return sprintf(buf, "%d\n", st->fail); in fail_show()
2399 if (sp->name) { in states_show()
2400 cur = sprintf(buf, "%3d: %s\n", i, sp->name); in states_show()
2436 return -EINVAL; in __store_smt_control()
2439 return -EPERM; in __store_smt_control()
2442 return -ENODEV; in __store_smt_control()
2469 return -ENODEV; in __store_smt_control()
2486 return snprintf(buf, PAGE_SIZE - 2, "%s\n", state); in control_show()
2499 return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active()); in active_show()
2517 return sysfs_create_group(&cpu_subsys.dev_root->kobj, in cpu_smt_sysfs_init()
2529 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, in cpuhp_sysfs_init()
2539 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group); in cpuhp_sysfs_init()
2556 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2619 * IPI/NMI broadcasts when shutting down CPUs. Invocation from in set_cpu_online()