Lines Matching +full:no +full:- +full:idle +full:- +full:on +full:- +full:init
1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/sched/idle.h>
15 #include <linux/init.h>
20 #include <linux/user-return-notifier.h>
26 #include <linux/elf-randomize.h>
44 #include <asm/spec-ctrl.h>
54 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
55 * no more per-task TSS's. The TSS size is kept cacheline-aligned
57 * section. Since TSS's are completely CPU-local, we want them
58 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
64 * privilege level. Since the init task never runs anything
65 * but ring 0 code, there is no need for a valid value here.
68 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
92 dst->thread.vm86 = NULL; in arch_dup_task_struct()
95 dst->thread.fpu.fpstate = NULL; in arch_dup_task_struct()
104 fpstate_free(&tsk->thread.fpu); in arch_release_task_struct()
113 struct thread_struct *t = &tsk->thread; in exit_thread()
114 struct fpu *fpu = &t->fpu; in exit_thread()
129 return do_set_thread_area(p, -1, utls, 0); in set_new_tls()
136 unsigned long clone_flags = args->flags; in copy_thread()
137 unsigned long sp = args->stack; in copy_thread()
138 unsigned long tls = args->tls; in copy_thread()
146 frame = &fork_frame->frame; in copy_thread()
148 frame->bp = encode_frame_pointer(childregs); in copy_thread()
149 frame->ret_addr = (unsigned long) ret_from_fork; in copy_thread()
150 p->thread.sp = (unsigned long) fork_frame; in copy_thread()
151 p->thread.io_bitmap = NULL; in copy_thread()
152 p->thread.iopl_warn = 0; in copy_thread()
153 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); in copy_thread()
157 p->thread.fsindex = current->thread.fsindex; in copy_thread()
158 p->thread.fsbase = current->thread.fsbase; in copy_thread()
159 p->thread.gsindex = current->thread.gsindex; in copy_thread()
160 p->thread.gsbase = current->thread.gsbase; in copy_thread()
162 savesegment(es, p->thread.es); in copy_thread()
163 savesegment(ds, p->thread.ds); in copy_thread()
165 p->thread.sp0 = (unsigned long) (childregs + 1); in copy_thread()
166 savesegment(gs, p->thread.gs); in copy_thread()
173 frame->flags = X86_EFLAGS_FIXED; in copy_thread()
176 fpu_clone(p, clone_flags, args->fn); in copy_thread()
179 if (unlikely(p->flags & PF_KTHREAD)) { in copy_thread()
180 p->thread.pkru = pkru_get_init_value(); in copy_thread()
182 kthread_frame_init(frame, args->fn, args->fn_arg); in copy_thread()
187 * Clone current's PKRU value from hardware. tsk->thread.pkru in copy_thread()
190 p->thread.pkru = read_pkru(); in copy_thread()
192 frame->bx = 0; in copy_thread()
194 childregs->ax = 0; in copy_thread()
196 childregs->sp = sp; in copy_thread()
198 if (unlikely(args->fn)) { in copy_thread()
209 childregs->sp = 0; in copy_thread()
210 childregs->ip = 0; in copy_thread()
211 kthread_frame_init(frame, args->fn, args->fn_arg); in copy_thread()
239 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); in flush_thread()
288 return -EINVAL; in set_tsc_mode()
295 static void set_cpuid_faulting(bool on) in set_cpuid_faulting() argument
301 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT); in set_cpuid_faulting()
340 return -ENODEV; in set_cpuid_mode()
355 /* If cpuid was previously disabled for this task, re-enable it. */ in arch_setup_new_exec()
379 * If the next task has an I/O bitmap it will handle it on exit to in switch_to_bitmap()
396 memcpy(tss->io_bitmap.bitmap, iobm->bitmap, in tss_copy_io_bitmap()
397 max(tss->io_bitmap.prev_max, iobm->max)); in tss_copy_io_bitmap()
403 tss->io_bitmap.prev_max = iobm->max; in tss_copy_io_bitmap()
404 tss->io_bitmap.prev_sequence = iobm->sequence; in tss_copy_io_bitmap()
408 * native_tss_update_io_bitmap - Update I/O bitmap before exiting to user mode
413 struct thread_struct *t = ¤t->thread; in native_tss_update_io_bitmap()
414 u16 *base = &tss->x86_tss.io_bitmap_base; in native_tss_update_io_bitmap()
421 if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) { in native_tss_update_io_bitmap()
424 struct io_bitmap *iobm = t->io_bitmap; in native_tss_update_io_bitmap()
430 if (tss->io_bitmap.prev_sequence != iobm->sequence) in native_tss_update_io_bitmap()
468 st->local_state = 0; in speculative_store_bypass_ht_init()
471 * Shared state setup happens once on the first bringup in speculative_store_bypass_ht_init()
472 * of the CPU. It's not destroyed on CPU hotunplug. in speculative_store_bypass_ht_init()
474 if (st->shared_state) in speculative_store_bypass_ht_init()
477 raw_spin_lock_init(&st->lock); in speculative_store_bypass_ht_init()
491 st->shared_state = per_cpu(ssb_state, cpu).shared_state; in speculative_store_bypass_ht_init()
496 * First HT sibling to come up on the core. Link shared state of in speculative_store_bypass_ht_init()
497 * the first HT sibling to itself. The siblings on the same core in speculative_store_bypass_ht_init()
501 st->shared_state = st; in speculative_store_bypass_ht_init()
524 * Since this can race with prctl(), block reentry on the in amd_set_core_ssb_state()
527 if (__test_and_set_bit(LSTATE_SSB, &st->local_state)) in amd_set_core_ssb_state()
532 raw_spin_lock(&st->shared_state->lock); in amd_set_core_ssb_state()
534 if (!st->shared_state->disable_state) in amd_set_core_ssb_state()
536 st->shared_state->disable_state++; in amd_set_core_ssb_state()
537 raw_spin_unlock(&st->shared_state->lock); in amd_set_core_ssb_state()
539 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state)) in amd_set_core_ssb_state()
542 raw_spin_lock(&st->shared_state->lock); in amd_set_core_ssb_state()
543 st->shared_state->disable_state--; in amd_set_core_ssb_state()
544 if (!st->shared_state->disable_state) in amd_set_core_ssb_state()
546 raw_spin_unlock(&st->shared_state->lock); in amd_set_core_ssb_state()
582 /* Handle change of TIF_SSBD depending on the mitigation method. */ in __speculation_ctrl_update()
692 * Idle related variables and functions
718 * Called from the generic idle code.
726 * We use this if we don't have any better idle routine..
758 * Use wbinvd on processors that support SME. This provides support in stop_this_cpu()
760 * to SME active (or vice-versa). The cache must be cleared so that in stop_this_cpu()
782 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
817 * Do not prefer MWAIT if MONITOR instruction has a bug or idle=nomwait
828 /* MWAIT is not supported on this platform. Fallback to HALT */ in prefer_mwait_c1_over_halt()
853 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
854 * with interrupts enabled and no flags, which is backwards compatible with the
862 clflush((void *)¤t_thread_info()->flags); in mwait_idle()
866 __monitor((void *)¤t_thread_info()->flags, 0, 0); in mwait_idle()
881 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); in select_idle_routine()
887 pr_info("using AMD E400 aware idle routine\n"); in select_idle_routine()
890 pr_info("using mwait in idle threads\n"); in select_idle_routine()
893 pr_info("using TDX aware idle routine\n"); in select_idle_routine()
902 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id()); in amd_e400_c1e_apic_setup()
935 return -EINVAL; in idle_setup()
938 pr_info("using polling idle threads\n"); in idle_setup()
943 * When the boot option of idle=halt is added, halt is in idle_setup()
944 * forced to be used for CPU idle. In such case CPU C2/C3 in idle_setup()
946 * To continue to load the CPU idle driver, don't touch in idle_setup()
953 * If the boot option of "idle=nomwait" is added, in idle_setup()
959 return -1; in idle_setup()
963 early_param("idle", idle_setup);
967 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) in arch_align_stack()
968 sp -= prandom_u32_max(8192); in arch_align_stack()
974 return randomize_page(mm->brk, 0x02000000); in arch_randomize_brk()
978 * Called from fs/proc with a reference on @p to find the function
1021 return -EINVAL; in do_arch_prctl_common()