Lines Matching +full:local +full:- +full:timers

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 * No idle tick implementation for low and high resolution timers
26 #include <linux/posix-timers.h>
32 #include "tick-internal.h"
37 * Per-CPU nohz control structure
205 if (ts->last_tick_jiffies != jiffies) { in tick_sched_do_timer()
206 ts->stalled_jiffies = 0; in tick_sched_do_timer()
207 ts->last_tick_jiffies = READ_ONCE(jiffies); in tick_sched_do_timer()
209 if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) { in tick_sched_do_timer()
211 ts->stalled_jiffies = 0; in tick_sched_do_timer()
212 ts->last_tick_jiffies = READ_ONCE(jiffies); in tick_sched_do_timer()
216 if (ts->inidle) in tick_sched_do_timer()
217 ts->got_idle_tick = 1; in tick_sched_do_timer()
231 if (ts->tick_stopped) { in tick_sched_handle()
234 ts->idle_jiffies++; in tick_sched_handle()
240 ts->next_tick = 0; in tick_sched_handle()
297 if (check_tick_dependency(&ts->tick_dep_mask)) in can_stop_full_tick()
300 if (check_tick_dependency(&current->tick_dep_mask)) in can_stop_full_tick()
303 if (check_tick_dependency(&current->signal->tick_dep_mask)) in can_stop_full_tick()
319 * re-evaluate its dependency on the tick and restart it if necessary.
333 * re-evaluate its dependency on the tick and restart it if necessary.
351 * activate_task() STORE p->tick_dep_mask in tick_nohz_kick_task()
352 * STORE p->on_rq in tick_nohz_kick_task()
354 * LOCK rq->lock LOAD p->on_rq in tick_nohz_kick_task()
357 * LOAD p->tick_dep_mask in tick_nohz_kick_task()
368 * STORE p->cpu = @cpu in tick_nohz_kick_task()
370 * LOCK rq->lock in tick_nohz_kick_task()
371 * smp_mb__after_spin_lock() STORE p->tick_dep_mask in tick_nohz_kick_task()
373 * LOAD p->tick_dep_mask LOAD p->cpu in tick_nohz_kick_task()
384 * Kick all full dynticks CPUs in order to force these to re-evaluate
425 * Set per-CPU tick dependency. Used by scheduler and perf events in order to
435 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_set_cpu()
438 /* Perf needs local kick that is NMI safe */ in tick_nohz_dep_set_cpu()
442 /* Remote irq work not NMI-safe */ in tick_nohz_dep_set_cpu()
455 atomic_andnot(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_clear_cpu()
460 * Set a per-task tick dependency. RCU need this. Also posix CPU timers
461 * in order to elapse per task timers.
465 if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) in tick_nohz_dep_set_task()
472 atomic_andnot(BIT(bit), &tsk->tick_dep_mask); in tick_nohz_dep_clear_task()
477 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
478 * per process timers.
484 struct signal_struct *sig = tsk->signal; in tick_nohz_dep_set_signal()
486 prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); in tick_nohz_dep_set_signal()
490 lockdep_assert_held(&tsk->sighand->siglock); in tick_nohz_dep_set_signal()
498 atomic_andnot(BIT(bit), &sig->tick_dep_mask); in tick_nohz_dep_clear_signal()
502 * Re-evaluate the need for the tick as we switch the current task.
504 * perf events, posix CPU timers, ...
515 if (ts->tick_stopped) { in __tick_nohz_task_switch()
516 if (atomic_read(&current->tick_dep_mask) || in __tick_nohz_task_switch()
517 atomic_read(&current->signal->tick_dep_mask)) in __tick_nohz_task_switch()
522 /* Get the boot-time nohz CPU list from the kernel parameters. */
534 * timers, workqueues, timekeeping, ...) on behalf of full dynticks in tick_nohz_cpu_down()
538 return -EBUSY; in tick_nohz_cpu_down()
555 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n"); in tick_nohz_init()
585 * NOHZ - aka dynamic tick functionality
607 return ts->tick_stopped; in tick_nohz_tick_stopped()
614 return ts->tick_stopped; in tick_nohz_tick_stopped_cpu()
618 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
641 * Updates the per-CPU time idle statistics counters
648 if (ts->idle_active) { in update_ts_time_stats()
649 delta = ktime_sub(now, ts->idle_entrytime); in update_ts_time_stats()
651 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); in update_ts_time_stats()
653 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); in update_ts_time_stats()
654 ts->idle_entrytime = now; in update_ts_time_stats()
665 ts->idle_active = 0; in tick_nohz_stop_idle()
672 ts->idle_entrytime = ktime_get(); in tick_nohz_start_idle()
673 ts->idle_active = 1; in tick_nohz_start_idle()
678 * get_cpu_idle_time_us - get the total idle time of a CPU
689 * This function returns -1 if NOHZ is not enabled.
697 return -1; in get_cpu_idle_time_us()
702 idle = ts->idle_sleeptime; in get_cpu_idle_time_us()
704 if (ts->idle_active && !nr_iowait_cpu(cpu)) { in get_cpu_idle_time_us()
705 ktime_t delta = ktime_sub(now, ts->idle_entrytime); in get_cpu_idle_time_us()
707 idle = ktime_add(ts->idle_sleeptime, delta); in get_cpu_idle_time_us()
709 idle = ts->idle_sleeptime; in get_cpu_idle_time_us()
719 * get_cpu_iowait_time_us - get the total iowait time of a CPU
730 * This function returns -1 if NOHZ is not enabled.
738 return -1; in get_cpu_iowait_time_us()
743 iowait = ts->iowait_sleeptime; in get_cpu_iowait_time_us()
745 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { in get_cpu_iowait_time_us()
746 ktime_t delta = ktime_sub(now, ts->idle_entrytime); in get_cpu_iowait_time_us()
748 iowait = ktime_add(ts->iowait_sleeptime, delta); in get_cpu_iowait_time_us()
750 iowait = ts->iowait_sleeptime; in get_cpu_iowait_time_us()
760 hrtimer_cancel(&ts->sched_timer); in tick_nohz_restart()
761 hrtimer_set_expires(&ts->sched_timer, ts->last_tick); in tick_nohz_restart()
764 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); in tick_nohz_restart()
766 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { in tick_nohz_restart()
767 hrtimer_start_expires(&ts->sched_timer, in tick_nohz_restart()
770 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_nohz_restart()
777 ts->next_tick = 0; in tick_nohz_restart()
797 ts->last_jiffies = basejiff; in tick_nohz_next_event()
798 ts->timer_expires_base = basemono; in tick_nohz_next_event()
803 * Aside of that check whether the local timer softirq is in tick_nohz_next_event()
816 * timers are enabled this only takes the timer wheel in tick_nohz_next_event()
817 * timers into account. If high resolution timers are in tick_nohz_next_event()
822 ts->next_timer = next_tick; in tick_nohz_next_event()
829 delta = next_tick - basemono; in tick_nohz_next_event()
840 if (!ts->tick_stopped) { in tick_nohz_next_event()
841 ts->timer_expires = 0; in tick_nohz_next_event()
853 (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last)) in tick_nohz_next_event()
857 if (delta < (KTIME_MAX - basemono)) in tick_nohz_next_event()
862 ts->timer_expires = min_t(u64, expires, next_tick); in tick_nohz_next_event()
865 return ts->timer_expires; in tick_nohz_next_event()
871 u64 basemono = ts->timer_expires_base; in tick_nohz_stop_tick()
872 u64 expires = ts->timer_expires; in tick_nohz_stop_tick()
876 ts->timer_expires_base = 0; in tick_nohz_stop_tick()
888 ts->do_timer_last = 1; in tick_nohz_stop_tick()
890 ts->do_timer_last = 0; in tick_nohz_stop_tick()
894 if (ts->tick_stopped && (expires == ts->next_tick)) { in tick_nohz_stop_tick()
896 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) in tick_nohz_stop_tick()
900 …printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->exp… in tick_nohz_stop_tick()
901 basemono, ts->next_tick, dev->next_event, in tick_nohz_stop_tick()
902 hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); in tick_nohz_stop_tick()
912 if (!ts->tick_stopped) { in tick_nohz_stop_tick()
916 ts->last_tick = hrtimer_get_expires(&ts->sched_timer); in tick_nohz_stop_tick()
917 ts->tick_stopped = 1; in tick_nohz_stop_tick()
921 ts->next_tick = tick; in tick_nohz_stop_tick()
928 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) in tick_nohz_stop_tick()
929 hrtimer_cancel(&ts->sched_timer); in tick_nohz_stop_tick()
935 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { in tick_nohz_stop_tick()
936 hrtimer_start(&ts->sched_timer, tick, in tick_nohz_stop_tick()
939 hrtimer_set_expires(&ts->sched_timer, tick); in tick_nohz_stop_tick()
946 ts->timer_expires_base = 0; in tick_nohz_retain_tick()
975 ts->tick_stopped = 0; in tick_nohz_restart_sched_tick()
987 else if (ts->tick_stopped) in __tick_nohz_full_update_tick()
997 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) in tick_nohz_full_update_tick()
1035 pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n", in report_idle_softirq()
1058 ts->next_tick = 0; in can_stop_idle_tick()
1062 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) in can_stop_idle_tick()
1079 /* Should not happen for nohz-full */ in can_stop_idle_tick()
1096 if (ts->timer_expires_base) in __tick_nohz_idle_stop_tick()
1097 expires = ts->timer_expires; in __tick_nohz_idle_stop_tick()
1103 ts->idle_calls++; in __tick_nohz_idle_stop_tick()
1106 int was_stopped = ts->tick_stopped; in __tick_nohz_idle_stop_tick()
1110 ts->idle_sleeps++; in __tick_nohz_idle_stop_tick()
1111 ts->idle_expires = expires; in __tick_nohz_idle_stop_tick()
1113 if (!was_stopped && ts->tick_stopped) { in __tick_nohz_idle_stop_tick()
1114 ts->idle_jiffies = ts->last_jiffies; in __tick_nohz_idle_stop_tick()
1123 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
1143 * tick_nohz_idle_enter - prepare for entering idle on the current CPU
1157 WARN_ON_ONCE(ts->timer_expires_base); in tick_nohz_idle_enter()
1159 ts->inidle = 1; in tick_nohz_idle_enter()
1166 * tick_nohz_irq_exit - update next tick event from interrupt exit
1171 * So we need to re-calculate and reprogram the next tick event.
1177 if (ts->inidle) in tick_nohz_irq_exit()
1184 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run
1190 if (ts->got_idle_tick) { in tick_nohz_idle_got_tick()
1191 ts->got_idle_tick = 0; in tick_nohz_idle_got_tick()
1198 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
1206 return __this_cpu_read(tick_cpu_device.evtdev)->next_event; in tick_nohz_get_next_hrtimer()
1210 * tick_nohz_get_sleep_length - return the expected length of the current sleep
1228 ktime_t now = ts->idle_entrytime; in tick_nohz_get_sleep_length()
1231 WARN_ON_ONCE(!ts->inidle); in tick_nohz_get_sleep_length()
1233 *delta_next = ktime_sub(dev->next_event, now); in tick_nohz_get_sleep_length()
1247 hrtimer_next_event_without(&ts->sched_timer)); in tick_nohz_get_sleep_length()
1253 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
1262 return ts->idle_calls; in tick_nohz_get_idle_calls_cpu()
1266 * tick_nohz_get_idle_calls - return the current idle calls counter value
1274 return ts->idle_calls; in tick_nohz_get_idle_calls()
1282 ts->idle_exittime = now; in tick_nohz_account_idle_time()
1291 ticks = jiffies - ts->idle_jiffies; in tick_nohz_account_idle_time()
1303 if (ts->tick_stopped) { in tick_nohz_idle_restart_tick()
1321 * tick_nohz_idle_exit - restart the idle tick from the idle task
1335 WARN_ON_ONCE(!ts->inidle); in tick_nohz_idle_exit()
1336 WARN_ON_ONCE(ts->timer_expires_base); in tick_nohz_idle_exit()
1338 ts->inidle = 0; in tick_nohz_idle_exit()
1339 idle_active = ts->idle_active; in tick_nohz_idle_exit()
1340 tick_stopped = ts->tick_stopped; in tick_nohz_idle_exit()
1363 dev->next_event = KTIME_MAX; in tick_nohz_handler()
1368 if (unlikely(ts->tick_stopped)) { in tick_nohz_handler()
1378 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); in tick_nohz_handler()
1379 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_nohz_handler()
1386 ts->nohz_mode = mode; in tick_nohz_activate()
1393 * tick_nohz_switch_to_nohz - switch to nohz mode
1410 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); in tick_nohz_switch_to_nohz()
1414 hrtimer_set_expires(&ts->sched_timer, next); in tick_nohz_switch_to_nohz()
1415 hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); in tick_nohz_switch_to_nohz()
1416 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_nohz_switch_to_nohz()
1425 if (!ts->idle_active && !ts->tick_stopped) in tick_nohz_irq_enter()
1428 if (ts->idle_active) in tick_nohz_irq_enter()
1437 if (ts->tick_stopped) in tick_nohz_irq_enter()
1482 ts->next_tick = 0; in tick_sched_timer()
1485 if (unlikely(ts->tick_stopped)) in tick_sched_timer()
1504 * tick_setup_sched_timer - setup the tick emulation timer
1512 * Emulate tick processing via per-CPU hrtimers: in tick_setup_sched_timer()
1514 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); in tick_setup_sched_timer()
1515 ts->sched_timer.function = tick_sched_timer; in tick_setup_sched_timer()
1517 /* Get the next period (per-CPU) */ in tick_setup_sched_timer()
1518 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); in tick_setup_sched_timer()
1525 hrtimer_add_expires_ns(&ts->sched_timer, offset); in tick_setup_sched_timer()
1528 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); in tick_setup_sched_timer()
1529 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); in tick_setup_sched_timer()
1540 if (ts->sched_timer.base) in tick_cancel_sched_timer()
1541 hrtimer_cancel(&ts->sched_timer); in tick_cancel_sched_timer()
1566 set_bit(0, &ts->check_clocks); in tick_oneshot_notify()
1573 * softirq) allow_nohz signals, that we can switch into low-res nohz
1574 * mode, because high resolution timers are disabled (either compile
1581 if (!test_and_clear_bit(0, &ts->check_clocks)) in tick_check_oneshot_change()
1584 if (ts->nohz_mode != NOHZ_MODE_INACTIVE) in tick_check_oneshot_change()