Lines Matching +full:local +full:- +full:timers

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 * High-resolution kernel timers
9 * In contrast to the low-resolution timeout API, aka timer wheel,
49 #include "tick-internal.h"
52 * Masks for selecting the soft and hard context timers from
53 * cpu_base->active
56 #define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
118 [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
135 * timer->base->cpu_base
154 * means that all timers which are tied to this base via timer->base are
157 * So __run_timers/migrate_timers can safely modify all timers which could
161 * possible to set timer->base = &migration_base and drop the lock: the timer
171 base = READ_ONCE(timer->base); in lock_hrtimer_base()
173 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
174 if (likely(base == timer->base)) in lock_hrtimer_base()
177 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
190 * Called with cpu_base->lock of target cpu held.
197 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); in hrtimer_check_target()
198 return expires < new_base->cpu_base->expires_next; in hrtimer_check_target()
213 * We switch the timer base to a power-optimized selected CPU target,
215 * - NO_HZ_COMMON is enabled
216 * - timer migration is enabled
217 * - the timer callback is not running
218 * - the timer is not the first expiring timer on the new target
230 int basenum = base->index; in switch_hrtimer_base()
235 new_base = &new_cpu_base->clock_base[basenum]; in switch_hrtimer_base()
251 WRITE_ONCE(timer->base, &migration_base); in switch_hrtimer_base()
252 raw_spin_unlock(&base->cpu_base->lock); in switch_hrtimer_base()
253 raw_spin_lock(&new_base->cpu_base->lock); in switch_hrtimer_base()
257 raw_spin_unlock(&new_base->cpu_base->lock); in switch_hrtimer_base()
258 raw_spin_lock(&base->cpu_base->lock); in switch_hrtimer_base()
260 WRITE_ONCE(timer->base, base); in switch_hrtimer_base()
263 WRITE_ONCE(timer->base, new_base); in switch_hrtimer_base()
284 struct hrtimer_clock_base *base = timer->base; in lock_hrtimer_base()
286 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
310 tmp = dclc < 0 ? -dclc : dclc; in __ktime_divns()
319 return dclc < 0 ? -tmp : tmp; in __ktime_divns()
349 return ((struct hrtimer *) addr)->function; in hrtimer_debug_hint()
354 * - an active object is initialized
372 * - an active object is activated
373 * - an unknown non-static object is activated
388 * - an active object is freed
445 debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr); in hrtimer_init_sleeper_on_stack()
496 return &cpu_base->clock_base[idx]; in __next_base()
514 next = timerqueue_getnext(&base->active); in __hrtimer_next_event_base()
524 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); in __hrtimer_next_event_base()
532 if (timer->is_soft) in __hrtimer_next_event_base()
533 cpu_base->softirq_next_timer = timer; in __hrtimer_next_event_base()
535 cpu_base->next_timer = timer; in __hrtimer_next_event_base()
539 * clock_was_set() might have changed base->offset of any of in __hrtimer_next_event_base()
556 * those timers will get run whenever the softirq gets handled, at the end of
557 * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
564 * - HRTIMER_ACTIVE_ALL,
565 * - HRTIMER_ACTIVE_SOFT, or
566 * - HRTIMER_ACTIVE_HARD.
575 if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) { in __hrtimer_get_next_event()
576 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; in __hrtimer_get_next_event()
577 cpu_base->softirq_next_timer = NULL; in __hrtimer_get_next_event()
581 next_timer = cpu_base->softirq_next_timer; in __hrtimer_get_next_event()
585 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; in __hrtimer_get_next_event()
586 cpu_base->next_timer = next_timer; in __hrtimer_get_next_event()
603 if (!cpu_base->softirq_activated) { in hrtimer_update_next_event()
609 cpu_base->softirq_expires_next = soft; in hrtimer_update_next_event()
614 * If a softirq timer is expiring first, update cpu_base->next_timer in hrtimer_update_next_event()
618 cpu_base->next_timer = cpu_base->softirq_next_timer; in hrtimer_update_next_event()
627 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; in hrtimer_update_base()
628 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; in hrtimer_update_base()
629 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; in hrtimer_update_base()
631 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, in hrtimer_update_base()
634 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real; in hrtimer_update_base()
635 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot; in hrtimer_update_base()
636 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai; in hrtimer_update_base()
647 cpu_base->hres_active : 0; in __hrtimer_hres_active()
659 cpu_base->expires_next = expires_next; in __hrtimer_reprogram()
675 * set. So we'd effectively block all timers until the T2 event in __hrtimer_reprogram()
678 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) in __hrtimer_reprogram()
687 * Called with interrupts disabled and base->lock held
696 if (skip_equal && expires_next == cpu_base->expires_next) in hrtimer_force_reprogram()
699 __hrtimer_reprogram(cpu_base, cpu_base->next_timer, expires_next); in hrtimer_force_reprogram()
723 * hrtimer_high_res_enabled - query, if the highres mode is enabled
741 base->cpu); in hrtimer_switch_to_hres()
744 base->hres_active = 1; in hrtimer_switch_to_hres()
764 * - CONFIG_HIGH_RES_TIMERS is enabled.
765 * - CONFIG_NOHZ_COMMON is enabled
792 raw_spin_lock(&base->lock); in retrigger_next_event()
798 raw_spin_unlock(&base->lock); in retrigger_next_event()
803 * timers, we have to check, whether it expires earlier than the timer for
806 * Called with interrupts disabled and base->cpu_base.lock held
811 struct hrtimer_clock_base *base = timer->base; in hrtimer_reprogram()
812 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); in hrtimer_reprogram()
818 * expiry time which is less than base->offset. Set it to 0. in hrtimer_reprogram()
823 if (timer->is_soft) { in hrtimer_reprogram()
828 * first hard hrtimer on the remote CPU - in hrtimer_reprogram()
831 struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base; in hrtimer_reprogram()
833 if (timer_cpu_base->softirq_activated) in hrtimer_reprogram()
836 if (!ktime_before(expires, timer_cpu_base->softirq_expires_next)) in hrtimer_reprogram()
839 timer_cpu_base->softirq_next_timer = timer; in hrtimer_reprogram()
840 timer_cpu_base->softirq_expires_next = expires; in hrtimer_reprogram()
842 if (!ktime_before(expires, timer_cpu_base->expires_next) || in hrtimer_reprogram()
851 if (base->cpu_base != cpu_base) in hrtimer_reprogram()
854 if (expires >= cpu_base->expires_next) in hrtimer_reprogram()
861 if (cpu_base->in_hrtirq) in hrtimer_reprogram()
864 cpu_base->next_timer = timer; in hrtimer_reprogram()
886 seq = cpu_base->clock_was_set_seq; in update_needs_ipi()
893 if (seq == cpu_base->clock_was_set_seq) in update_needs_ipi()
901 if (cpu_base->in_hrtirq) in update_needs_ipi()
910 active &= cpu_base->active_bases; in update_needs_ipi()
915 next = timerqueue_getnext(&base->active); in update_needs_ipi()
916 expires = ktime_sub(next->expires, base->offset); in update_needs_ipi()
917 if (expires < cpu_base->expires_next) in update_needs_ipi()
921 if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT) in update_needs_ipi()
923 if (cpu_base->softirq_activated) in update_needs_ipi()
925 if (expires < cpu_base->softirq_expires_next) in update_needs_ipi()
936 * vs. CLOCK_MONOTONIC. When high resolution timers are enabled, then this
965 raw_spin_lock_irqsave(&cpu_base->lock, flags); in clock_was_set()
970 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in clock_was_set()
1007 /* Retrigger on the local CPU */ in hrtimers_resume_local()
1017 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); in unlock_hrtimer_base()
1021 * hrtimer_forward - forward the timer expiry
1047 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) in hrtimer_forward()
1073 * enqueue_hrtimer - internal function to (re)start a timer
1086 base->cpu_base->active_bases |= 1 << base->index; in enqueue_hrtimer()
1089 WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED); in enqueue_hrtimer()
1091 return timerqueue_add(&base->active, &timer->node); in enqueue_hrtimer()
1095 * __remove_hrtimer - internal function to remove a timer
1108 struct hrtimer_cpu_base *cpu_base = base->cpu_base; in __remove_hrtimer()
1109 u8 state = timer->state; in __remove_hrtimer()
1112 WRITE_ONCE(timer->state, newstate); in __remove_hrtimer()
1116 if (!timerqueue_del(&base->active, &timer->node)) in __remove_hrtimer()
1117 cpu_base->active_bases &= ~(1 << base->index); in __remove_hrtimer()
1121 * cpu_base->next_timer. This happens when we remove the first in __remove_hrtimer()
1123 * cpu_base->next_timer. So the worst thing what can happen is in __remove_hrtimer()
1127 if (reprogram && timer == cpu_base->next_timer) in __remove_hrtimer()
1138 u8 state = timer->state; in remove_hrtimer()
1152 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); in remove_hrtimer()
1156 * required if the timer is local. If it is local and about in remove_hrtimer()
1177 * granular time values. For relative timers we add hrtimer_resolution in hrtimer_update_lowres()
1180 timer->is_rel = mode & HRTIMER_MODE_REL; in hrtimer_update_lowres()
1181 if (timer->is_rel) in hrtimer_update_lowres()
1200 * hrtimer. cpu_base->softirq_expires_next needs to be updated! in hrtimer_update_softirq_timer()
1206 * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event() in hrtimer_update_softirq_timer()
1207 * cpu_base->*expires_next is only set by hrtimer_reprogram() in hrtimer_update_softirq_timer()
1209 hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram); in hrtimer_update_softirq_timer()
1220 * If the timer is on the local cpu base and is the first expiring in __hrtimer_start_range_ns()
1223 * reprogram on removal, keep the timer local to the current CPU in __hrtimer_start_range_ns()
1227 force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases); in __hrtimer_start_range_ns()
1228 force_local &= base->cpu_base->next_timer == timer; in __hrtimer_start_range_ns()
1236 * skip reprogramming, keep the timer local and enforce in __hrtimer_start_range_ns()
1244 tim = ktime_add_safe(tim, base->get_time()); in __hrtimer_start_range_ns()
1267 hrtimer_force_reprogram(new_base->cpu_base, 1); in __hrtimer_start_range_ns()
1272 * hrtimer_start_range_ns - (re)start an hrtimer
1289 * expiry mode because unmarked timers are moved to softirq expiry. in hrtimer_start_range_ns()
1292 WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft); in hrtimer_start_range_ns()
1294 WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard); in hrtimer_start_range_ns()
1306 * hrtimer_try_to_cancel - try to deactivate a timer
1313 * * -1 when the timer is currently executing the callback function and
1320 int ret = -1; in hrtimer_try_to_cancel()
1346 spin_lock_init(&base->softirq_expiry_lock); in hrtimer_cpu_base_init_expiry_lock()
1351 spin_lock(&base->softirq_expiry_lock); in hrtimer_cpu_base_lock_expiry()
1356 spin_unlock(&base->softirq_expiry_lock); in hrtimer_cpu_base_unlock_expiry()
1362 * If there is a waiter for cpu_base->expiry_lock, then it was waiting for
1369 if (atomic_read(&cpu_base->timer_waiters)) { in hrtimer_sync_wait_running()
1370 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_sync_wait_running()
1371 spin_unlock(&cpu_base->softirq_expiry_lock); in hrtimer_sync_wait_running()
1372 spin_lock(&cpu_base->softirq_expiry_lock); in hrtimer_sync_wait_running()
1373 raw_spin_lock_irq(&cpu_base->lock); in hrtimer_sync_wait_running()
1386 * - If the caller is on a remote CPU then it has to spin wait for the timer
1389 * - If the caller originates from the task which preempted the timer
1396 struct hrtimer_clock_base *base = READ_ONCE(timer->base); in hrtimer_cancel_wait_running()
1402 if (!timer->is_soft || is_migration_base(base)) { in hrtimer_cancel_wait_running()
1414 atomic_inc(&base->cpu_base->timer_waiters); in hrtimer_cancel_wait_running()
1415 spin_lock_bh(&base->cpu_base->softirq_expiry_lock); in hrtimer_cancel_wait_running()
1416 atomic_dec(&base->cpu_base->timer_waiters); in hrtimer_cancel_wait_running()
1417 spin_unlock_bh(&base->cpu_base->softirq_expiry_lock); in hrtimer_cancel_wait_running()
1431 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1453 * __hrtimer_get_remaining - get remaining time for the timer
1455 * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
1475 * hrtimer_get_next_event - get the time until next expiry event
1485 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_get_next_event()
1490 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_get_next_event()
1496 * hrtimer_next_event_without - time until next expiry event w/o one timer
1499 * Returns the next expiry time over all timers except for the @exclude one or
1508 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_next_event_without()
1513 if (!cpu_base->softirq_activated) { in hrtimer_next_event_without()
1514 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; in hrtimer_next_event_without()
1518 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; in hrtimer_next_event_without()
1523 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_next_event_without()
1562 * POSIX magic: Relative CLOCK_REALTIME timers are not affected by in __hrtimer_init()
1571 timer->is_soft = softtimer; in __hrtimer_init()
1572 timer->is_hard = !!(mode & HRTIMER_MODE_HARD); in __hrtimer_init()
1573 timer->base = &cpu_base->clock_base[base]; in __hrtimer_init()
1574 timerqueue_init(&timer->node); in __hrtimer_init()
1578 * hrtimer_init - initialize a timer to the given clock
1610 base = READ_ONCE(timer->base); in hrtimer_active()
1611 seq = raw_read_seqcount_begin(&base->seq); in hrtimer_active()
1613 if (timer->state != HRTIMER_STATE_INACTIVE || in hrtimer_active()
1614 base->running == timer) in hrtimer_active()
1617 } while (read_seqcount_retry(&base->seq, seq) || in hrtimer_active()
1618 base != READ_ONCE(timer->base)); in hrtimer_active()
1628 * - queued: the timer is queued
1629 * - callback: the timer is being ran
1630 * - post: the timer is inactive or (re)queued
1632 * On the read side we ensure we observe timer->state and cpu_base->running
1634 * This includes timer->base changing because sequence numbers alone are
1645 unsigned long flags) __must_hold(&cpu_base->lock) in __run_hrtimer()
1651 lockdep_assert_held(&cpu_base->lock); in __run_hrtimer()
1654 base->running = timer; in __run_hrtimer()
1657 * Separate the ->running assignment from the ->state assignment. in __run_hrtimer()
1660 * hrtimer_active() cannot observe base->running == NULL && in __run_hrtimer()
1661 * timer->state == INACTIVE. in __run_hrtimer()
1663 raw_write_seqcount_barrier(&base->seq); in __run_hrtimer()
1666 fn = timer->function; in __run_hrtimer()
1674 timer->is_rel = false; in __run_hrtimer()
1681 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in __run_hrtimer()
1689 raw_spin_lock_irq(&cpu_base->lock); in __run_hrtimer()
1696 * Note: Because we dropped the cpu_base->lock above, in __run_hrtimer()
1701 !(timer->state & HRTIMER_STATE_ENQUEUED)) in __run_hrtimer()
1705 * Separate the ->running assignment from the ->state assignment. in __run_hrtimer()
1708 * hrtimer_active() cannot observe base->running.timer == NULL && in __run_hrtimer()
1709 * timer->state == INACTIVE. in __run_hrtimer()
1711 raw_write_seqcount_barrier(&base->seq); in __run_hrtimer()
1713 WARN_ON_ONCE(base->running != timer); in __run_hrtimer()
1714 base->running = NULL; in __run_hrtimer()
1721 unsigned int active = cpu_base->active_bases & active_mask; in __hrtimer_run_queues()
1727 basenow = ktime_add(now, base->offset); in __hrtimer_run_queues()
1729 while ((node = timerqueue_getnext(&base->active))) { in __hrtimer_run_queues()
1736 * minimizing wakeups, not running timers at the in __hrtimer_run_queues()
1742 * We don't add extra wakeups by delaying timers that in __hrtimer_run_queues()
1743 * are right-of a not yet expired timer, because that in __hrtimer_run_queues()
1763 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_run_softirq()
1768 cpu_base->softirq_activated = 0; in hrtimer_run_softirq()
1771 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_run_softirq()
1788 BUG_ON(!cpu_base->hres_active); in hrtimer_interrupt()
1789 cpu_base->nr_events++; in hrtimer_interrupt()
1790 dev->next_event = KTIME_MAX; in hrtimer_interrupt()
1792 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_interrupt()
1795 cpu_base->in_hrtirq = 1; in hrtimer_interrupt()
1797 * We set expires_next to KTIME_MAX here with cpu_base->lock in hrtimer_interrupt()
1800 * timers which run their callback and need to be requeued on in hrtimer_interrupt()
1803 cpu_base->expires_next = KTIME_MAX; in hrtimer_interrupt()
1805 if (!ktime_before(now, cpu_base->softirq_expires_next)) { in hrtimer_interrupt()
1806 cpu_base->softirq_expires_next = KTIME_MAX; in hrtimer_interrupt()
1807 cpu_base->softirq_activated = 1; in hrtimer_interrupt()
1819 cpu_base->expires_next = expires_next; in hrtimer_interrupt()
1820 cpu_base->in_hrtirq = 0; in hrtimer_interrupt()
1821 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_interrupt()
1825 cpu_base->hang_detected = 0; in hrtimer_interrupt()
1831 * - tracing in hrtimer_interrupt()
1832 * - long lasting callbacks in hrtimer_interrupt()
1833 * - being scheduled away when running in a VM in hrtimer_interrupt()
1842 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_interrupt()
1844 cpu_base->nr_retries++; in hrtimer_interrupt()
1853 cpu_base->nr_hangs++; in hrtimer_interrupt()
1854 cpu_base->hang_detected = 1; in hrtimer_interrupt()
1855 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_interrupt()
1858 if ((unsigned int)delta > cpu_base->max_hang_time) in hrtimer_interrupt()
1859 cpu_base->max_hang_time = (unsigned int) delta; in hrtimer_interrupt()
1881 if (td && td->evtdev) in __hrtimer_peek_ahead_timers()
1882 hrtimer_interrupt(td->evtdev); in __hrtimer_peek_ahead_timers()
1915 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_run_queues()
1918 if (!ktime_before(now, cpu_base->softirq_expires_next)) { in hrtimer_run_queues()
1919 cpu_base->softirq_expires_next = KTIME_MAX; in hrtimer_run_queues()
1920 cpu_base->softirq_activated = 1; in hrtimer_run_queues()
1925 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_run_queues()
1935 struct task_struct *task = t->task; in hrtimer_wakeup()
1937 t->task = NULL; in hrtimer_wakeup()
1945 * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer
1949 * Wrapper around hrtimer_start_expires() for hrtimer_sleeper based timers
1962 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard) in hrtimer_sleeper_start_expires()
1965 hrtimer_start_expires(&sl->timer, mode); in hrtimer_sleeper_start_expires()
1982 * spawn many threads which arm timers for the same expiry time on in __hrtimer_init_sleeper()
1986 * OTOH, privileged real-time user space applications rely on the in __hrtimer_init_sleeper()
1988 * a real-time scheduling class, mark the mode for hard interrupt in __hrtimer_init_sleeper()
1996 __hrtimer_init(&sl->timer, clock_id, mode); in __hrtimer_init_sleeper()
1997 sl->timer.function = hrtimer_wakeup; in __hrtimer_init_sleeper()
1998 sl->task = current; in __hrtimer_init_sleeper()
2002 * hrtimer_init_sleeper - initialize sleeper to the given clock
2010 debug_init(&sl->timer, clock_id, mode); in hrtimer_init_sleeper()
2018 switch(restart->nanosleep.type) { in nanosleep_copyout()
2021 if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp)) in nanosleep_copyout()
2022 return -EFAULT; in nanosleep_copyout()
2026 if (put_timespec64(ts, restart->nanosleep.rmtp)) in nanosleep_copyout()
2027 return -EFAULT; in nanosleep_copyout()
2032 return -ERESTART_RESTARTBLOCK; in nanosleep_copyout()
2043 if (likely(t->task)) in do_nanosleep()
2046 hrtimer_cancel(&t->timer); in do_nanosleep()
2049 } while (t->task && !signal_pending(current)); in do_nanosleep()
2053 if (!t->task) in do_nanosleep()
2056 restart = &current->restart_block; in do_nanosleep()
2057 if (restart->nanosleep.type != TT_NONE) { in do_nanosleep()
2058 ktime_t rem = hrtimer_expires_remaining(&t->timer); in do_nanosleep()
2067 return -ERESTART_RESTARTBLOCK; in do_nanosleep()
2075 hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid, in hrtimer_nanosleep_restart()
2077 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); in hrtimer_nanosleep_restart()
2091 slack = current->timer_slack_ns; in hrtimer_nanosleep()
2098 if (ret != -ERESTART_RESTARTBLOCK) in hrtimer_nanosleep()
2101 /* Absolute timers do not update the rmtp value and restart: */ in hrtimer_nanosleep()
2103 ret = -ERESTARTNOHAND; in hrtimer_nanosleep()
2107 restart = &current->restart_block; in hrtimer_nanosleep()
2108 restart->nanosleep.clockid = t.timer.base->clockid; in hrtimer_nanosleep()
2109 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); in hrtimer_nanosleep()
2124 return -EFAULT; in SYSCALL_DEFINE2()
2127 return -EINVAL; in SYSCALL_DEFINE2()
2129 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; in SYSCALL_DEFINE2()
2130 current->restart_block.nanosleep.rmtp = rmtp; in SYSCALL_DEFINE2()
2145 return -EFAULT; in SYSCALL_DEFINE2()
2148 return -EINVAL; in SYSCALL_DEFINE2()
2150 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; in SYSCALL_DEFINE2()
2151 current->restart_block.nanosleep.compat_rmtp = rmtp; in SYSCALL_DEFINE2()
2158 * Functions related to boot-time initialization:
2166 struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i]; in hrtimers_prepare_cpu()
2168 clock_b->cpu_base = cpu_base; in hrtimers_prepare_cpu()
2169 seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock); in hrtimers_prepare_cpu()
2170 timerqueue_init_head(&clock_b->active); in hrtimers_prepare_cpu()
2173 cpu_base->cpu = cpu; in hrtimers_prepare_cpu()
2174 cpu_base->active_bases = 0; in hrtimers_prepare_cpu()
2175 cpu_base->hres_active = 0; in hrtimers_prepare_cpu()
2176 cpu_base->hang_detected = 0; in hrtimers_prepare_cpu()
2177 cpu_base->next_timer = NULL; in hrtimers_prepare_cpu()
2178 cpu_base->softirq_next_timer = NULL; in hrtimers_prepare_cpu()
2179 cpu_base->expires_next = KTIME_MAX; in hrtimers_prepare_cpu()
2180 cpu_base->softirq_expires_next = KTIME_MAX; in hrtimers_prepare_cpu()
2193 while ((node = timerqueue_getnext(&old_base->active))) { in migrate_hrtimer_list()
2204 timer->base = new_base; in migrate_hrtimer_list()
2206 * Enqueue the timers on the new cpu. This does not in migrate_hrtimer_list()
2210 * sort out already expired timers and reprogram the in migrate_hrtimer_list()
2227 * not wakeup ksoftirqd (and acquire the pi-lock) while in hrtimers_dead_cpu()
2238 raw_spin_lock(&new_base->lock); in hrtimers_dead_cpu()
2239 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); in hrtimers_dead_cpu()
2242 migrate_hrtimer_list(&old_base->clock_base[i], in hrtimers_dead_cpu()
2243 &new_base->clock_base[i]); in hrtimers_dead_cpu()
2252 raw_spin_unlock(&old_base->lock); in hrtimers_dead_cpu()
2253 raw_spin_unlock(&new_base->lock); in hrtimers_dead_cpu()
2271 * schedule_hrtimeout_range_clock - sleep until timeout
2297 return -EINTR; in schedule_hrtimeout_range_clock()
2312 return !t.task ? 0 : -EINTR; in schedule_hrtimeout_range_clock()
2317 * schedule_hrtimeout_range - sleep until timeout
2331 * You can set the task state as follows -
2333 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
2337 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2346 * by an explicit wakeup, it returns -EINTR.
2357 * schedule_hrtimeout - sleep until timeout
2365 * You can set the task state as follows -
2367 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
2371 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2380 * by an explicit wakeup, it returns -EINTR.