1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic entry points for the idle threads and
4 * implementation of the idle task scheduling class.
5 *
6 * (NOTE: these are not related to SCHED_IDLE batch scheduled
7 * tasks which are handled in sched/fair.c )
8 */
9 #include "sched.h"
10
11 #include <trace/events/power.h>
12
13 /* Linker adds these: start and end of __cpuidle functions */
14 extern char __cpuidle_text_start[], __cpuidle_text_end[];
15
16 /**
17 * sched_idle_set_state - Record idle state for the current CPU.
18 * @idle_state: State to record.
19 */
sched_idle_set_state(struct cpuidle_state * idle_state)20 void sched_idle_set_state(struct cpuidle_state *idle_state)
21 {
22 idle_set_state(this_rq(), idle_state);
23 }
24
25 static int __read_mostly cpu_idle_force_poll;
26
cpu_idle_poll_ctrl(bool enable)27 void cpu_idle_poll_ctrl(bool enable)
28 {
29 if (enable) {
30 cpu_idle_force_poll++;
31 } else {
32 cpu_idle_force_poll--;
33 WARN_ON_ONCE(cpu_idle_force_poll < 0);
34 }
35 }
36
37 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
cpu_idle_poll_setup(char * __unused)38 static int __init cpu_idle_poll_setup(char *__unused)
39 {
40 cpu_idle_force_poll = 1;
41
42 return 1;
43 }
44 __setup("nohlt", cpu_idle_poll_setup);
45
cpu_idle_nopoll_setup(char * __unused)46 static int __init cpu_idle_nopoll_setup(char *__unused)
47 {
48 cpu_idle_force_poll = 0;
49
50 return 1;
51 }
52 __setup("hlt", cpu_idle_nopoll_setup);
53 #endif
54
cpu_idle_poll(void)55 static noinline int __cpuidle cpu_idle_poll(void)
56 {
57 rcu_idle_enter();
58 trace_cpu_idle_rcuidle(0, smp_processor_id());
59 local_irq_enable();
60 stop_critical_timings();
61
62 while (!tif_need_resched() &&
63 (cpu_idle_force_poll || tick_check_broadcast_expired()))
64 cpu_relax();
65 start_critical_timings();
66 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
67 rcu_idle_exit();
68
69 return 1;
70 }
71
72 /* Weak implementations for optional arch specific functions */
arch_cpu_idle_prepare(void)73 void __weak arch_cpu_idle_prepare(void) { }
arch_cpu_idle_enter(void)74 void __weak arch_cpu_idle_enter(void) { }
arch_cpu_idle_exit(void)75 void __weak arch_cpu_idle_exit(void) { }
arch_cpu_idle_dead(void)76 void __weak arch_cpu_idle_dead(void) { }
arch_cpu_idle(void)77 void __weak arch_cpu_idle(void)
78 {
79 cpu_idle_force_poll = 1;
80 local_irq_enable();
81 }
82
83 /**
84 * default_idle_call - Default CPU idle routine.
85 *
86 * To use when the cpuidle framework cannot be used.
87 */
default_idle_call(void)88 void __cpuidle default_idle_call(void)
89 {
90 if (current_clr_polling_and_test()) {
91 local_irq_enable();
92 } else {
93 stop_critical_timings();
94 arch_cpu_idle();
95 start_critical_timings();
96 }
97 }
98
call_cpuidle(struct cpuidle_driver * drv,struct cpuidle_device * dev,int next_state)99 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
100 int next_state)
101 {
102 /*
103 * The idle task must be scheduled, it is pointless to go to idle, just
104 * update no idle residency and return.
105 */
106 if (current_clr_polling_and_test()) {
107 dev->last_residency = 0;
108 local_irq_enable();
109 return -EBUSY;
110 }
111
112 /*
113 * Enter the idle state previously returned by the governor decision.
114 * This function will block until an interrupt occurs and will take
115 * care of re-enabling the local interrupts
116 */
117 return cpuidle_enter(drv, dev, next_state);
118 }
119
120 /**
121 * cpuidle_idle_call - the main idle function
122 *
123 * NOTE: no locks or semaphores should be used here
124 *
125 * On archs that support TIF_POLLING_NRFLAG, is called with polling
126 * set, and it returns with polling set. If it ever stops polling, it
127 * must clear the polling bit.
128 */
cpuidle_idle_call(void)129 static void cpuidle_idle_call(void)
130 {
131 struct cpuidle_device *dev = cpuidle_get_device();
132 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
133 int next_state, entered_state;
134
135 /*
136 * Check if the idle task must be rescheduled. If it is the
137 * case, exit the function after re-enabling the local irq.
138 */
139 if (need_resched()) {
140 local_irq_enable();
141 return;
142 }
143
144 /*
145 * The RCU framework needs to be told that we are entering an idle
146 * section, so no more rcu read side critical sections and one more
147 * step to the grace period
148 */
149
150 if (cpuidle_not_available(drv, dev)) {
151 tick_nohz_idle_stop_tick();
152 rcu_idle_enter();
153
154 default_idle_call();
155 goto exit_idle;
156 }
157
158 /*
159 * Suspend-to-idle ("s2idle") is a system state in which all user space
160 * has been frozen, all I/O devices have been suspended and the only
161 * activity happens here and in iterrupts (if any). In that case bypass
162 * the cpuidle governor and go stratight for the deepest idle state
163 * available. Possibly also suspend the local tick and the entire
164 * timekeeping to prevent timer interrupts from kicking us out of idle
165 * until a proper wakeup interrupt happens.
166 */
167
168 if (idle_should_enter_s2idle() || dev->use_deepest_state) {
169 if (idle_should_enter_s2idle()) {
170 rcu_idle_enter();
171
172 entered_state = cpuidle_enter_s2idle(drv, dev);
173 if (entered_state > 0) {
174 local_irq_enable();
175 goto exit_idle;
176 }
177
178 rcu_idle_exit();
179 }
180
181 tick_nohz_idle_stop_tick();
182 rcu_idle_enter();
183
184 next_state = cpuidle_find_deepest_state(drv, dev);
185 call_cpuidle(drv, dev, next_state);
186 } else {
187 bool stop_tick = true;
188
189 /*
190 * Ask the cpuidle framework to choose a convenient idle state.
191 */
192 next_state = cpuidle_select(drv, dev, &stop_tick);
193
194 if (stop_tick || tick_nohz_tick_stopped())
195 tick_nohz_idle_stop_tick();
196 else
197 tick_nohz_idle_retain_tick();
198
199 rcu_idle_enter();
200
201 entered_state = call_cpuidle(drv, dev, next_state);
202 /*
203 * Give the governor an opportunity to reflect on the outcome
204 */
205 cpuidle_reflect(dev, entered_state);
206 }
207
208 exit_idle:
209 __current_set_polling();
210
211 /*
212 * It is up to the idle functions to reenable local interrupts
213 */
214 if (WARN_ON_ONCE(irqs_disabled()))
215 local_irq_enable();
216
217 rcu_idle_exit();
218 }
219
220 /*
221 * Generic idle loop implementation
222 *
223 * Called with polling cleared.
224 */
do_idle(void)225 static void do_idle(void)
226 {
227 int cpu = smp_processor_id();
228 /*
229 * If the arch has a polling bit, we maintain an invariant:
230 *
231 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
232 * rq->idle). This means that, if rq->idle has the polling bit set,
233 * then setting need_resched is guaranteed to cause the CPU to
234 * reschedule.
235 */
236
237 __current_set_polling();
238 tick_nohz_idle_enter();
239
240 while (!need_resched()) {
241 rmb();
242
243 local_irq_disable();
244
245 if (cpu_is_offline(cpu)) {
246 tick_nohz_idle_stop_tick();
247 cpuhp_report_idle_dead();
248 arch_cpu_idle_dead();
249 }
250
251 arch_cpu_idle_enter();
252
253 /*
254 * In poll mode we reenable interrupts and spin. Also if we
255 * detected in the wakeup from idle path that the tick
256 * broadcast device expired for us, we don't want to go deep
257 * idle as we know that the IPI is going to arrive right away.
258 */
259 if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
260 tick_nohz_idle_restart_tick();
261 cpu_idle_poll();
262 } else {
263 cpuidle_idle_call();
264 }
265 arch_cpu_idle_exit();
266 }
267
268 /*
269 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
270 * be set, propagate it into PREEMPT_NEED_RESCHED.
271 *
272 * This is required because for polling idle loops we will not have had
273 * an IPI to fold the state for us.
274 */
275 preempt_set_need_resched();
276 tick_nohz_idle_exit();
277 __current_clr_polling();
278
279 /*
280 * We promise to call sched_ttwu_pending() and reschedule if
281 * need_resched() is set while polling is set. That means that clearing
282 * polling needs to be visible before doing these things.
283 */
284 smp_mb__after_atomic();
285
286 sched_ttwu_pending();
287 schedule_idle();
288
289 if (unlikely(klp_patch_pending(current)))
290 klp_update_patch_state(current);
291 }
292
cpu_in_idle(unsigned long pc)293 bool cpu_in_idle(unsigned long pc)
294 {
295 return pc >= (unsigned long)__cpuidle_text_start &&
296 pc < (unsigned long)__cpuidle_text_end;
297 }
298
299 struct idle_timer {
300 struct hrtimer timer;
301 int done;
302 };
303
idle_inject_timer_fn(struct hrtimer * timer)304 static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
305 {
306 struct idle_timer *it = container_of(timer, struct idle_timer, timer);
307
308 WRITE_ONCE(it->done, 1);
309 set_tsk_need_resched(current);
310
311 return HRTIMER_NORESTART;
312 }
313
play_idle(unsigned long duration_us)314 void play_idle(unsigned long duration_us)
315 {
316 struct idle_timer it;
317
318 /*
319 * Only FIFO tasks can disable the tick since they don't need the forced
320 * preemption.
321 */
322 WARN_ON_ONCE(current->policy != SCHED_FIFO);
323 WARN_ON_ONCE(current->nr_cpus_allowed != 1);
324 WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
325 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
326 WARN_ON_ONCE(!duration_us);
327
328 rcu_sleep_check();
329 preempt_disable();
330 current->flags |= PF_IDLE;
331 cpuidle_use_deepest_state(true);
332
333 it.done = 0;
334 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
335 it.timer.function = idle_inject_timer_fn;
336 hrtimer_start(&it.timer, ns_to_ktime(duration_us * NSEC_PER_USEC),
337 HRTIMER_MODE_REL_PINNED);
338
339 while (!READ_ONCE(it.done))
340 do_idle();
341
342 cpuidle_use_deepest_state(false);
343 current->flags &= ~PF_IDLE;
344
345 preempt_fold_need_resched();
346 preempt_enable();
347 }
348 EXPORT_SYMBOL_GPL(play_idle);
349
cpu_startup_entry(enum cpuhp_state state)350 void cpu_startup_entry(enum cpuhp_state state)
351 {
352 arch_cpu_idle_prepare();
353 cpuhp_online_idle(state);
354 while (1)
355 do_idle();
356 }
357
358 /*
359 * idle-task scheduling class.
360 */
361
362 #ifdef CONFIG_SMP
363 static int
select_task_rq_idle(struct task_struct * p,int cpu,int sd_flag,int flags)364 select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
365 {
366 return task_cpu(p); /* IDLE tasks as never migrated */
367 }
368
369 static int
balance_idle(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)370 balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
371 {
372 return WARN_ON_ONCE(1);
373 }
374 #endif
375
376 /*
377 * Idle tasks are unconditionally rescheduled:
378 */
check_preempt_curr_idle(struct rq * rq,struct task_struct * p,int flags)379 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
380 {
381 resched_curr(rq);
382 }
383
put_prev_task_idle(struct rq * rq,struct task_struct * prev)384 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
385 {
386 }
387
set_next_task_idle(struct rq * rq,struct task_struct * next)388 static void set_next_task_idle(struct rq *rq, struct task_struct *next)
389 {
390 update_idle_core(rq);
391 schedstat_inc(rq->sched_goidle);
392 }
393
394 static struct task_struct *
pick_next_task_idle(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)395 pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
396 {
397 struct task_struct *next = rq->idle;
398
399 if (prev)
400 put_prev_task(rq, prev);
401
402 set_next_task_idle(rq, next);
403
404 return next;
405 }
406
407 /*
408 * It is not legal to sleep in the idle task - print a warning
409 * message if some code attempts to do it:
410 */
411 static void
dequeue_task_idle(struct rq * rq,struct task_struct * p,int flags)412 dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
413 {
414 raw_spin_unlock_irq(&rq->lock);
415 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
416 dump_stack();
417 raw_spin_lock_irq(&rq->lock);
418 }
419
420 /*
421 * scheduler tick hitting a task of our scheduling class.
422 *
423 * NOTE: This function can be called remotely by the tick offload that
424 * goes along full dynticks. Therefore no local assumption can be made
425 * and everything must be accessed through the @rq and @curr passed in
426 * parameters.
427 */
task_tick_idle(struct rq * rq,struct task_struct * curr,int queued)428 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
429 {
430 }
431
switched_to_idle(struct rq * rq,struct task_struct * p)432 static void switched_to_idle(struct rq *rq, struct task_struct *p)
433 {
434 BUG();
435 }
436
437 static void
prio_changed_idle(struct rq * rq,struct task_struct * p,int oldprio)438 prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
439 {
440 BUG();
441 }
442
get_rr_interval_idle(struct rq * rq,struct task_struct * task)443 static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
444 {
445 return 0;
446 }
447
update_curr_idle(struct rq * rq)448 static void update_curr_idle(struct rq *rq)
449 {
450 }
451
452 /*
453 * Simple, special scheduling class for the per-CPU idle tasks:
454 */
455 const struct sched_class idle_sched_class = {
456 /* .next is NULL */
457 /* no enqueue/yield_task for idle tasks */
458
459 /* dequeue is not valid, we print a debug message there: */
460 .dequeue_task = dequeue_task_idle,
461
462 .check_preempt_curr = check_preempt_curr_idle,
463
464 .pick_next_task = pick_next_task_idle,
465 .put_prev_task = put_prev_task_idle,
466 .set_next_task = set_next_task_idle,
467
468 #ifdef CONFIG_SMP
469 .balance = balance_idle,
470 .select_task_rq = select_task_rq_idle,
471 .set_cpus_allowed = set_cpus_allowed_common,
472 #endif
473
474 .task_tick = task_tick_idle,
475
476 .get_rr_interval = get_rr_interval_idle,
477
478 .prio_changed = prio_changed_idle,
479 .switched_to = switched_to_idle,
480 .update_curr = update_curr_idle,
481 };
482