1 /*
2 * Generic entry points for the idle threads and
3 * implementation of the idle task scheduling class.
4 *
5 * (NOTE: these are not related to SCHED_IDLE batch scheduled
6 * tasks which are handled in sched/fair.c )
7 */
8 #include "sched.h"
9
10 #include <trace/events/power.h>
11
12 /* Linker adds these: start and end of __cpuidle functions */
13 extern char __cpuidle_text_start[], __cpuidle_text_end[];
14
15 /**
16 * sched_idle_set_state - Record idle state for the current CPU.
17 * @idle_state: State to record.
18 */
sched_idle_set_state(struct cpuidle_state * idle_state)19 void sched_idle_set_state(struct cpuidle_state *idle_state)
20 {
21 idle_set_state(this_rq(), idle_state);
22 }
23
24 static int __read_mostly cpu_idle_force_poll;
25
cpu_idle_poll_ctrl(bool enable)26 void cpu_idle_poll_ctrl(bool enable)
27 {
28 if (enable) {
29 cpu_idle_force_poll++;
30 } else {
31 cpu_idle_force_poll--;
32 WARN_ON_ONCE(cpu_idle_force_poll < 0);
33 }
34 }
35
36 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
cpu_idle_poll_setup(char * __unused)37 static int __init cpu_idle_poll_setup(char *__unused)
38 {
39 cpu_idle_force_poll = 1;
40
41 return 1;
42 }
43 __setup("nohlt", cpu_idle_poll_setup);
44
cpu_idle_nopoll_setup(char * __unused)45 static int __init cpu_idle_nopoll_setup(char *__unused)
46 {
47 cpu_idle_force_poll = 0;
48
49 return 1;
50 }
51 __setup("hlt", cpu_idle_nopoll_setup);
52 #endif
53
cpu_idle_poll(void)54 static noinline int __cpuidle cpu_idle_poll(void)
55 {
56 rcu_idle_enter();
57 trace_cpu_idle_rcuidle(0, smp_processor_id());
58 local_irq_enable();
59 stop_critical_timings();
60
61 while (!tif_need_resched() &&
62 (cpu_idle_force_poll || tick_check_broadcast_expired()))
63 cpu_relax();
64 start_critical_timings();
65 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
66 rcu_idle_exit();
67
68 return 1;
69 }
70
71 /* Weak implementations for optional arch specific functions */
arch_cpu_idle_prepare(void)72 void __weak arch_cpu_idle_prepare(void) { }
arch_cpu_idle_enter(void)73 void __weak arch_cpu_idle_enter(void) { }
arch_cpu_idle_exit(void)74 void __weak arch_cpu_idle_exit(void) { }
arch_cpu_idle_dead(void)75 void __weak arch_cpu_idle_dead(void) { }
arch_cpu_idle(void)76 void __weak arch_cpu_idle(void)
77 {
78 cpu_idle_force_poll = 1;
79 local_irq_enable();
80 }
81
82 /**
83 * default_idle_call - Default CPU idle routine.
84 *
85 * To use when the cpuidle framework cannot be used.
86 */
default_idle_call(void)87 void __cpuidle default_idle_call(void)
88 {
89 if (current_clr_polling_and_test()) {
90 local_irq_enable();
91 } else {
92 stop_critical_timings();
93 arch_cpu_idle();
94 start_critical_timings();
95 }
96 }
97
call_cpuidle(struct cpuidle_driver * drv,struct cpuidle_device * dev,int next_state)98 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
99 int next_state)
100 {
101 /*
102 * The idle task must be scheduled, it is pointless to go to idle, just
103 * update no idle residency and return.
104 */
105 if (current_clr_polling_and_test()) {
106 dev->last_residency = 0;
107 local_irq_enable();
108 return -EBUSY;
109 }
110
111 /*
112 * Enter the idle state previously returned by the governor decision.
113 * This function will block until an interrupt occurs and will take
114 * care of re-enabling the local interrupts
115 */
116 return cpuidle_enter(drv, dev, next_state);
117 }
118
119 /**
120 * cpuidle_idle_call - the main idle function
121 *
122 * NOTE: no locks or semaphores should be used here
123 *
124 * On archs that support TIF_POLLING_NRFLAG, is called with polling
125 * set, and it returns with polling set. If it ever stops polling, it
126 * must clear the polling bit.
127 */
cpuidle_idle_call(void)128 static void cpuidle_idle_call(void)
129 {
130 struct cpuidle_device *dev = cpuidle_get_device();
131 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
132 int next_state, entered_state;
133
134 /*
135 * Check if the idle task must be rescheduled. If it is the
136 * case, exit the function after re-enabling the local irq.
137 */
138 if (need_resched()) {
139 local_irq_enable();
140 return;
141 }
142
143 /*
144 * The RCU framework needs to be told that we are entering an idle
145 * section, so no more rcu read side critical sections and one more
146 * step to the grace period
147 */
148
149 if (cpuidle_not_available(drv, dev)) {
150 tick_nohz_idle_stop_tick();
151 rcu_idle_enter();
152
153 default_idle_call();
154 goto exit_idle;
155 }
156
157 /*
158 * Suspend-to-idle ("s2idle") is a system state in which all user space
159 * has been frozen, all I/O devices have been suspended and the only
160 * activity happens here and in iterrupts (if any). In that case bypass
161 * the cpuidle governor and go stratight for the deepest idle state
162 * available. Possibly also suspend the local tick and the entire
163 * timekeeping to prevent timer interrupts from kicking us out of idle
164 * until a proper wakeup interrupt happens.
165 */
166
167 if (idle_should_enter_s2idle() || dev->use_deepest_state) {
168 if (idle_should_enter_s2idle()) {
169 rcu_idle_enter();
170
171 entered_state = cpuidle_enter_s2idle(drv, dev);
172 if (entered_state > 0) {
173 local_irq_enable();
174 goto exit_idle;
175 }
176
177 rcu_idle_exit();
178 }
179
180 tick_nohz_idle_stop_tick();
181 rcu_idle_enter();
182
183 next_state = cpuidle_find_deepest_state(drv, dev);
184 call_cpuidle(drv, dev, next_state);
185 } else {
186 bool stop_tick = true;
187
188 /*
189 * Ask the cpuidle framework to choose a convenient idle state.
190 */
191 next_state = cpuidle_select(drv, dev, &stop_tick);
192
193 if (stop_tick || tick_nohz_tick_stopped())
194 tick_nohz_idle_stop_tick();
195 else
196 tick_nohz_idle_retain_tick();
197
198 rcu_idle_enter();
199
200 entered_state = call_cpuidle(drv, dev, next_state);
201 /*
202 * Give the governor an opportunity to reflect on the outcome
203 */
204 cpuidle_reflect(dev, entered_state);
205 }
206
207 exit_idle:
208 __current_set_polling();
209
210 /*
211 * It is up to the idle functions to reenable local interrupts
212 */
213 if (WARN_ON_ONCE(irqs_disabled()))
214 local_irq_enable();
215
216 rcu_idle_exit();
217 }
218
219 /*
220 * Generic idle loop implementation
221 *
222 * Called with polling cleared.
223 */
do_idle(void)224 static void do_idle(void)
225 {
226 int cpu = smp_processor_id();
227 /*
228 * If the arch has a polling bit, we maintain an invariant:
229 *
230 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
231 * rq->idle). This means that, if rq->idle has the polling bit set,
232 * then setting need_resched is guaranteed to cause the CPU to
233 * reschedule.
234 */
235
236 __current_set_polling();
237 tick_nohz_idle_enter();
238
239 while (!need_resched()) {
240 check_pgt_cache();
241 rmb();
242
243 if (cpu_is_offline(cpu)) {
244 tick_nohz_idle_stop_tick_protected();
245 cpuhp_report_idle_dead();
246 arch_cpu_idle_dead();
247 }
248
249 local_irq_disable();
250 arch_cpu_idle_enter();
251
252 /*
253 * In poll mode we reenable interrupts and spin. Also if we
254 * detected in the wakeup from idle path that the tick
255 * broadcast device expired for us, we don't want to go deep
256 * idle as we know that the IPI is going to arrive right away.
257 */
258 if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
259 tick_nohz_idle_restart_tick();
260 cpu_idle_poll();
261 } else {
262 cpuidle_idle_call();
263 }
264 arch_cpu_idle_exit();
265 }
266
267 /*
268 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
269 * be set, propagate it into PREEMPT_NEED_RESCHED.
270 *
271 * This is required because for polling idle loops we will not have had
272 * an IPI to fold the state for us.
273 */
274 preempt_set_need_resched();
275 tick_nohz_idle_exit();
276 __current_clr_polling();
277
278 /*
279 * We promise to call sched_ttwu_pending() and reschedule if
280 * need_resched() is set while polling is set. That means that clearing
281 * polling needs to be visible before doing these things.
282 */
283 smp_mb__after_atomic();
284
285 sched_ttwu_pending();
286 schedule_idle();
287
288 if (unlikely(klp_patch_pending(current)))
289 klp_update_patch_state(current);
290 }
291
cpu_in_idle(unsigned long pc)292 bool cpu_in_idle(unsigned long pc)
293 {
294 return pc >= (unsigned long)__cpuidle_text_start &&
295 pc < (unsigned long)__cpuidle_text_end;
296 }
297
298 struct idle_timer {
299 struct hrtimer timer;
300 int done;
301 };
302
idle_inject_timer_fn(struct hrtimer * timer)303 static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
304 {
305 struct idle_timer *it = container_of(timer, struct idle_timer, timer);
306
307 WRITE_ONCE(it->done, 1);
308 set_tsk_need_resched(current);
309
310 return HRTIMER_NORESTART;
311 }
312
play_idle(unsigned long duration_ms)313 void play_idle(unsigned long duration_ms)
314 {
315 struct idle_timer it;
316
317 /*
318 * Only FIFO tasks can disable the tick since they don't need the forced
319 * preemption.
320 */
321 WARN_ON_ONCE(current->policy != SCHED_FIFO);
322 WARN_ON_ONCE(current->nr_cpus_allowed != 1);
323 WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
324 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
325 WARN_ON_ONCE(!duration_ms);
326
327 rcu_sleep_check();
328 preempt_disable();
329 current->flags |= PF_IDLE;
330 cpuidle_use_deepest_state(true);
331
332 it.done = 0;
333 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
334 it.timer.function = idle_inject_timer_fn;
335 hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
336
337 while (!READ_ONCE(it.done))
338 do_idle();
339
340 cpuidle_use_deepest_state(false);
341 current->flags &= ~PF_IDLE;
342
343 preempt_fold_need_resched();
344 preempt_enable();
345 }
346 EXPORT_SYMBOL_GPL(play_idle);
347
cpu_startup_entry(enum cpuhp_state state)348 void cpu_startup_entry(enum cpuhp_state state)
349 {
350 /*
351 * This #ifdef needs to die, but it's too late in the cycle to
352 * make this generic (ARM and SH have never invoked the canary
353 * init for the non boot CPUs!). Will be fixed in 3.11
354 */
355 #ifdef CONFIG_X86
356 /*
357 * If we're the non-boot CPU, nothing set the stack canary up
358 * for us. The boot CPU already has it initialized but no harm
359 * in doing it again. This is a good place for updating it, as
360 * we wont ever return from this function (so the invalid
361 * canaries already on the stack wont ever trigger).
362 */
363 boot_init_stack_canary();
364 #endif
365 arch_cpu_idle_prepare();
366 cpuhp_online_idle(state);
367 while (1)
368 do_idle();
369 }
370
371 /*
372 * idle-task scheduling class.
373 */
374
375 #ifdef CONFIG_SMP
376 static int
select_task_rq_idle(struct task_struct * p,int cpu,int sd_flag,int flags)377 select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
378 {
379 return task_cpu(p); /* IDLE tasks as never migrated */
380 }
381 #endif
382
383 /*
384 * Idle tasks are unconditionally rescheduled:
385 */
check_preempt_curr_idle(struct rq * rq,struct task_struct * p,int flags)386 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
387 {
388 resched_curr(rq);
389 }
390
391 static struct task_struct *
pick_next_task_idle(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)392 pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
393 {
394 put_prev_task(rq, prev);
395 update_idle_core(rq);
396 schedstat_inc(rq->sched_goidle);
397
398 return rq->idle;
399 }
400
401 /*
402 * It is not legal to sleep in the idle task - print a warning
403 * message if some code attempts to do it:
404 */
405 static void
dequeue_task_idle(struct rq * rq,struct task_struct * p,int flags)406 dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
407 {
408 raw_spin_unlock_irq(&rq->lock);
409 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
410 dump_stack();
411 raw_spin_lock_irq(&rq->lock);
412 }
413
put_prev_task_idle(struct rq * rq,struct task_struct * prev)414 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
415 {
416 }
417
418 /*
419 * scheduler tick hitting a task of our scheduling class.
420 *
421 * NOTE: This function can be called remotely by the tick offload that
422 * goes along full dynticks. Therefore no local assumption can be made
423 * and everything must be accessed through the @rq and @curr passed in
424 * parameters.
425 */
task_tick_idle(struct rq * rq,struct task_struct * curr,int queued)426 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
427 {
428 }
429
set_curr_task_idle(struct rq * rq)430 static void set_curr_task_idle(struct rq *rq)
431 {
432 }
433
switched_to_idle(struct rq * rq,struct task_struct * p)434 static void switched_to_idle(struct rq *rq, struct task_struct *p)
435 {
436 BUG();
437 }
438
439 static void
prio_changed_idle(struct rq * rq,struct task_struct * p,int oldprio)440 prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
441 {
442 BUG();
443 }
444
get_rr_interval_idle(struct rq * rq,struct task_struct * task)445 static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
446 {
447 return 0;
448 }
449
update_curr_idle(struct rq * rq)450 static void update_curr_idle(struct rq *rq)
451 {
452 }
453
454 /*
455 * Simple, special scheduling class for the per-CPU idle tasks:
456 */
457 const struct sched_class idle_sched_class = {
458 /* .next is NULL */
459 /* no enqueue/yield_task for idle tasks */
460
461 /* dequeue is not valid, we print a debug message there: */
462 .dequeue_task = dequeue_task_idle,
463
464 .check_preempt_curr = check_preempt_curr_idle,
465
466 .pick_next_task = pick_next_task_idle,
467 .put_prev_task = put_prev_task_idle,
468
469 #ifdef CONFIG_SMP
470 .select_task_rq = select_task_rq_idle,
471 .set_cpus_allowed = set_cpus_allowed_common,
472 #endif
473
474 .set_curr_task = set_curr_task_idle,
475 .task_tick = task_tick_idle,
476
477 .get_rr_interval = get_rr_interval_idle,
478
479 .prio_changed = prio_changed_idle,
480 .switched_to = switched_to_idle,
481 .update_curr = update_curr_idle,
482 };
483