Lines Matching +full:up +full:- +full:samples
1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/posix-timers.h>
20 #include "posix-timers.h"
28 pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC; in posix_cputimers_group_init()
29 pct->timers_active = true; in posix_cputimers_group_init()
35 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
39 * Returns 0 on success, -ESRCH on failure. Can fail if the task is exiting and
48 return -ESRCH; in update_rlimit_cpu()
102 ret = pid_for_clock(clock, false) ? 0 : -EINVAL; in validate_clock_permissions()
115 return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock)); in cpu_timer_task_rcu()
124 u64 delta, incr, expires = timer->it.cpu.node.expires; in bump_cpu_timer()
127 if (!timer->it_interval) in bump_cpu_timer()
133 incr = timer->it_interval; in bump_cpu_timer()
134 delta = now + incr - expires; in bump_cpu_timer()
137 for (i = 0; incr < delta - incr; i++) in bump_cpu_timer()
140 for (; i >= 0; incr >>= 1, i--) { in bump_cpu_timer()
144 timer->it.cpu.node.expires += incr; in bump_cpu_timer()
145 timer->it_overrun += 1LL << i; in bump_cpu_timer()
146 delta -= incr; in bump_cpu_timer()
148 return timer->it.cpu.node.expires; in bump_cpu_timer()
154 return !(~pct->bases[CPUCLOCK_PROF].nextevt | in expiry_cache_is_inactive()
155 ~pct->bases[CPUCLOCK_VIRT].nextevt | in expiry_cache_is_inactive()
156 ~pct->bases[CPUCLOCK_SCHED].nextevt); in expiry_cache_is_inactive()
165 tp->tv_sec = 0; in posix_cpu_clock_getres()
166 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); in posix_cpu_clock_getres()
173 tp->tv_nsec = 1; in posix_cpu_clock_getres()
188 return error ? : -EPERM; in posix_cpu_clock_set()
192 * Sample a per-thread clock for the given task. clkid is validated.
214 static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime) in store_samples() argument
216 samples[CPUCLOCK_PROF] = stime + utime; in store_samples()
217 samples[CPUCLOCK_VIRT] = utime; in store_samples()
218 samples[CPUCLOCK_SCHED] = rtime; in store_samples()
221 static void task_sample_cputime(struct task_struct *p, u64 *samples) in task_sample_cputime() argument
226 store_samples(samples, stime, utime, p->se.sum_exec_runtime); in task_sample_cputime()
230 u64 *samples) in proc_sample_cputime_atomic() argument
234 utime = atomic64_read(&at->utime); in proc_sample_cputime_atomic()
235 stime = atomic64_read(&at->stime); in proc_sample_cputime_atomic()
236 rtime = atomic64_read(&at->sum_exec_runtime); in proc_sample_cputime_atomic()
237 store_samples(samples, stime, utime, rtime); in proc_sample_cputime_atomic()
258 __update_gt_cputime(&cputime_atomic->utime, sum->utime); in update_gt_cputime()
259 __update_gt_cputime(&cputime_atomic->stime, sum->stime); in update_gt_cputime()
260 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime); in update_gt_cputime()
264 * thread_group_sample_cputime - Sample cputime for a given task
266 * @samples: Storage for time samples
274 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples) in thread_group_sample_cputime() argument
276 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; in thread_group_sample_cputime()
277 struct posix_cputimers *pct = &tsk->signal->posix_cputimers; in thread_group_sample_cputime()
279 WARN_ON_ONCE(!pct->timers_active); in thread_group_sample_cputime()
281 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples); in thread_group_sample_cputime()
285 * thread_group_start_cputime - Start cputime and return a sample
287 * @samples: Storage for time samples
296 static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples) in thread_group_start_cputime() argument
298 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; in thread_group_start_cputime()
299 struct posix_cputimers *pct = &tsk->signal->posix_cputimers; in thread_group_start_cputime()
304 if (!READ_ONCE(pct->timers_active)) { in thread_group_start_cputime()
313 update_gt_cputime(&cputimer->cputime_atomic, &sum); in thread_group_start_cputime()
322 WRITE_ONCE(pct->timers_active, true); in thread_group_start_cputime()
324 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples); in thread_group_start_cputime()
327 static void __thread_group_cputime(struct task_struct *tsk, u64 *samples) in __thread_group_cputime() argument
332 store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime); in __thread_group_cputime()
343 struct thread_group_cputimer *cputimer = &p->signal->cputimer; in cpu_clock_sample_group()
344 struct posix_cputimers *pct = &p->signal->posix_cputimers; in cpu_clock_sample_group()
345 u64 samples[CPUCLOCK_MAX]; in cpu_clock_sample_group() local
347 if (!READ_ONCE(pct->timers_active)) { in cpu_clock_sample_group()
349 thread_group_start_cputime(p, samples); in cpu_clock_sample_group()
351 __thread_group_cputime(p, samples); in cpu_clock_sample_group()
353 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples); in cpu_clock_sample_group()
356 return samples[clkid]; in cpu_clock_sample_group()
369 return -EINVAL; in posix_cpu_clock_get()
383 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
385 * new timer already all-zeros initialized.
393 pid = pid_for_clock(new_timer->it_clock, false); in posix_cpu_timer_create()
396 return -EINVAL; in posix_cpu_timer_create()
408 lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key); in posix_cpu_timer_create()
410 new_timer->kclock = &clock_posix_cpu; in posix_cpu_timer_create()
411 timerqueue_init(&new_timer->it.cpu.node); in posix_cpu_timer_create()
412 new_timer->it.cpu.pid = get_pid(pid); in posix_cpu_timer_create()
420 int clkidx = CPUCLOCK_WHICH(timer->it_clock); in timer_base()
422 if (CPUCLOCK_PERTHREAD(timer->it_clock)) in timer_base()
423 return tsk->posix_cputimers.bases + clkidx; in timer_base()
425 return tsk->signal->posix_cputimers.bases + clkidx; in timer_base()
430 * This will also re-evaluate the need to keep around the process wide
439 base->nextevt = 0; in trigger_base_recalc_expires()
454 struct cpu_timer *ctmr = &timer->it.cpu; in disarm_timer()
461 if (cpu_timer_getexpires(ctmr) == base->nextevt) in disarm_timer()
467 * Clean up a CPU-clock timer that is about to be destroyed.
474 struct cpu_timer *ctmr = &timer->it.cpu; in posix_cpu_timer_del()
495 WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node)); in posix_cpu_timer_del()
497 if (timer->it.cpu.firing) in posix_cpu_timer_del()
508 put_pid(ctmr->pid); in posix_cpu_timer_del()
521 ctmr->head = NULL; in cleanup_timerqueue()
534 cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead); in cleanup_timers()
535 cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead); in cleanup_timers()
536 cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead); in cleanup_timers()
546 cleanup_timers(&tsk->posix_cputimers); in posix_cpu_timers_exit()
550 cleanup_timers(&tsk->signal->posix_cputimers); in posix_cpu_timers_exit_group()
560 struct cpu_timer *ctmr = &timer->it.cpu; in arm_timer()
563 if (!cpu_timer_enqueue(&base->tqhead, ctmr)) in arm_timer()
567 * We are the new earliest-expiring POSIX 1.b timer, hence in arm_timer()
572 if (newexp < base->nextevt) in arm_timer()
573 base->nextevt = newexp; in arm_timer()
575 if (CPUCLOCK_PERTHREAD(timer->it_clock)) in arm_timer()
586 struct cpu_timer *ctmr = &timer->it.cpu; in cpu_timer_fire()
588 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { in cpu_timer_fire()
593 } else if (unlikely(timer->sigq == NULL)) { in cpu_timer_fire()
598 wake_up_process(timer->it_process); in cpu_timer_fire()
600 } else if (!timer->it_interval) { in cpu_timer_fire()
602 * One-shot timer. Clear it as soon as it's fired. in cpu_timer_fire()
606 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { in cpu_timer_fire()
614 ++timer->it_requeue_pending; in cpu_timer_fire()
627 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock); in posix_cpu_timer_set()
629 struct cpu_timer *ctmr = &timer->it.cpu; in posix_cpu_timer_set()
643 return -ESRCH; in posix_cpu_timer_set()
650 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value)); in posix_cpu_timer_set()
653 * Protect against sighand release/switch in exit/exec and p->cpu_timers in posix_cpu_timer_set()
654 * and p->signal->cpu_timers read/write in arm_timer() in posix_cpu_timer_set()
663 return -ESRCH; in posix_cpu_timer_set()
669 old_incr = timer->it_interval; in posix_cpu_timer_set()
672 if (unlikely(timer->it.cpu.firing)) { in posix_cpu_timer_set()
673 timer->it.cpu.firing = -1; in posix_cpu_timer_set()
687 if (CPUCLOCK_PERTHREAD(timer->it_clock)) in posix_cpu_timer_set()
694 old->it_value.tv_sec = 0; in posix_cpu_timer_set()
695 old->it_value.tv_nsec = 0; in posix_cpu_timer_set()
707 old_expires = exp - val; in posix_cpu_timer_set()
708 old->it_value = ns_to_timespec64(old_expires); in posix_cpu_timer_set()
710 old->it_value.tv_nsec = 1; in posix_cpu_timer_set()
711 old->it_value.tv_sec = 0; in posix_cpu_timer_set()
744 * set up the signal and overrun bookkeeping. in posix_cpu_timer_set()
746 timer->it_interval = timespec64_to_ktime(new->it_interval); in posix_cpu_timer_set()
753 timer->it_requeue_pending = (timer->it_requeue_pending + 2) & in posix_cpu_timer_set()
755 timer->it_overrun_last = 0; in posix_cpu_timer_set()
756 timer->it_overrun = -1; in posix_cpu_timer_set()
784 old->it_interval = ns_to_timespec64(old_incr); in posix_cpu_timer_set()
791 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock); in posix_cpu_timer_get()
792 struct cpu_timer *ctmr = &timer->it.cpu; in posix_cpu_timer_get()
804 itp->it_interval = ktime_to_timespec64(timer->it_interval); in posix_cpu_timer_get()
812 if (CPUCLOCK_PERTHREAD(timer->it_clock)) in posix_cpu_timer_get()
818 itp->it_value = ns_to_timespec64(expires - now); in posix_cpu_timer_get()
824 itp->it_value.tv_nsec = 1; in posix_cpu_timer_get()
825 itp->it_value.tv_sec = 0; in posix_cpu_timer_get()
849 ctmr->firing = 1; in collect_timerqueue()
851 list_add_tail(&ctmr->elist, firing); in collect_timerqueue()
857 static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples, in collect_posix_cputimers() argument
860 struct posix_cputimer_base *base = pct->bases; in collect_posix_cputimers()
864 base->nextevt = collect_timerqueue(&base->tqhead, firing, in collect_posix_cputimers()
865 samples[i]); in collect_posix_cputimers()
871 if (tsk->dl.dl_overrun) { in check_dl_overrun()
872 tsk->dl.dl_overrun = 0; in check_dl_overrun()
885 current->comm, task_pid_nr(current)); in check_rlimit()
892 * Check for any per-thread CPU timers that have fired and move them off
893 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
894 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
899 struct posix_cputimers *pct = &tsk->posix_cputimers; in check_thread_timers()
900 u64 samples[CPUCLOCK_MAX]; in check_thread_timers() local
909 task_sample_cputime(tsk, samples); in check_thread_timers()
910 collect_posix_cputimers(pct, samples, firing); in check_thread_timers()
918 unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ); in check_thread_timers()
929 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft; in check_thread_timers()
939 struct posix_cputimers *pct = &sig->posix_cputimers; in stop_process_timers()
942 WRITE_ONCE(pct->timers_active, false); in stop_process_timers()
949 if (!it->expires) in check_cpu_itimer()
952 if (cur_time >= it->expires) { in check_cpu_itimer()
953 if (it->incr) in check_cpu_itimer()
954 it->expires += it->incr; in check_cpu_itimer()
956 it->expires = 0; in check_cpu_itimer()
964 if (it->expires && it->expires < *expires) in check_cpu_itimer()
965 *expires = it->expires; in check_cpu_itimer()
969 * Check for any per-thread CPU timers that have fired and move them
970 * off the tsk->*_timers list onto the firing list. Per-thread timers
976 struct signal_struct *const sig = tsk->signal; in check_process_timers()
977 struct posix_cputimers *pct = &sig->posix_cputimers; in check_process_timers()
978 u64 samples[CPUCLOCK_MAX]; in check_process_timers() local
986 if (!READ_ONCE(pct->timers_active) || pct->expiry_active) in check_process_timers()
993 pct->expiry_active = true; in check_process_timers()
999 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples); in check_process_timers()
1000 collect_posix_cputimers(pct, samples, firing); in check_process_timers()
1005 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], in check_process_timers()
1006 &pct->bases[CPUCLOCK_PROF].nextevt, in check_process_timers()
1007 samples[CPUCLOCK_PROF], SIGPROF); in check_process_timers()
1008 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], in check_process_timers()
1009 &pct->bases[CPUCLOCK_VIRT].nextevt, in check_process_timers()
1010 samples[CPUCLOCK_VIRT], SIGVTALRM); in check_process_timers()
1014 /* RLIMIT_CPU is in seconds. Samples are nanoseconds */ in check_process_timers()
1016 u64 ptime = samples[CPUCLOCK_PROF]; in check_process_timers()
1027 sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1; in check_process_timers()
1032 if (softns < pct->bases[CPUCLOCK_PROF].nextevt) in check_process_timers()
1033 pct->bases[CPUCLOCK_PROF].nextevt = softns; in check_process_timers()
1039 pct->expiry_active = false; in check_process_timers()
1048 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock); in posix_cpu_timer_rearm()
1067 if (CPUCLOCK_PERTHREAD(timer->it_clock)) in posix_cpu_timer_rearm()
1075 * Now re-arm for the new expiry time. in posix_cpu_timer_rearm()
1084 * task_cputimers_expired - Check whether posix CPU timers are expired
1086 * @samples: Array of current samples for the CPUCLOCK clocks
1089 * Returns true if any member of @samples is greater than the corresponding
1090 * member of @pct->bases[CLK].nextevt. False otherwise
1093 task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct) in task_cputimers_expired() argument
1098 if (samples[i] >= pct->bases[i].nextevt) in task_cputimers_expired()
1105 * fastpath_timer_check - POSIX CPU timers fast path.
1116 struct posix_cputimers *pct = &tsk->posix_cputimers; in fastpath_timer_check()
1120 u64 samples[CPUCLOCK_MAX]; in fastpath_timer_check() local
1122 task_sample_cputime(tsk, samples); in fastpath_timer_check()
1123 if (task_cputimers_expired(samples, pct)) in fastpath_timer_check()
1127 sig = tsk->signal; in fastpath_timer_check()
1128 pct = &sig->posix_cputimers; in fastpath_timer_check()
1144 if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) { in fastpath_timer_check()
1145 u64 samples[CPUCLOCK_MAX]; in fastpath_timer_check() local
1147 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, in fastpath_timer_check()
1148 samples); in fastpath_timer_check()
1150 if (task_cputimers_expired(samples, pct)) in fastpath_timer_check()
1154 if (dl_task(tsk) && tsk->dl.dl_overrun) in fastpath_timer_check()
1177 memset(&p->posix_cputimers_work.work, 0, in clear_posix_cputimers_work()
1178 sizeof(p->posix_cputimers_work.work)); in clear_posix_cputimers_work()
1179 init_task_work(&p->posix_cputimers_work.work, in clear_posix_cputimers_work()
1181 p->posix_cputimers_work.scheduled = false; in clear_posix_cputimers_work()
1194 * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
1201 return tsk->posix_cputimers_work.scheduled; in posix_cpu_timers_work_scheduled()
1206 if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled)) in __run_posix_cpu_timers()
1210 tsk->posix_cputimers_work.scheduled = true; in __run_posix_cpu_timers()
1211 task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME); in __run_posix_cpu_timers()
1225 tsk->posix_cputimers_work.scheduled = false; in posix_cpu_timers_enable_work()
1246 tsk->posix_cputimers_work.scheduled = false; in posix_cpu_timers_enable_work()
1290 * Here we take off tsk->signal->cpu_timers[N] and in handle_posix_cpu_timers()
1291 * tsk->cpu_timers[N] all the timers that are firing, and in handle_posix_cpu_timers()
1314 * - On !RT kernels no tick can have happened on this CPU in handle_posix_cpu_timers()
1319 * - On RT kernels ticks might have happened but the tick in handle_posix_cpu_timers()
1335 * that gets the timer lock before we do will give it up and in handle_posix_cpu_timers()
1356 spin_lock(&timer->it_lock); in handle_posix_cpu_timers()
1357 list_del_init(&timer->it.cpu.elist); in handle_posix_cpu_timers()
1358 cpu_firing = timer->it.cpu.firing; in handle_posix_cpu_timers()
1359 timer->it.cpu.firing = 0; in handle_posix_cpu_timers()
1361 * The firing flag is -1 if we collided with a reset in handle_posix_cpu_timers()
1363 * almost-firing as an overrun. So don't generate an event. in handle_posix_cpu_timers()
1367 spin_unlock(&timer->it_lock); in handle_posix_cpu_timers()
1400 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1401 * The tsk->sighand->siglock must be held by the caller.
1411 nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt; in set_process_cpu_timer()
1425 *oldval -= now; in set_process_cpu_timer()
1452 * Set up a temporary timer and then wait for it to go off. in do_cpu_nanosleep()
1457 timer.it_overrun = -1; in do_cpu_nanosleep()
1526 error = -ERESTART_RESTARTBLOCK; in do_cpu_nanosleep()
1530 restart = ¤t->restart_block; in do_cpu_nanosleep()
1531 restart->nanosleep.expires = expires; in do_cpu_nanosleep()
1532 if (restart->nanosleep.type != TT_NONE) in do_cpu_nanosleep()
1544 struct restart_block *restart_block = ¤t->restart_block; in posix_cpu_nsleep()
1553 return -EINVAL; in posix_cpu_nsleep()
1557 if (error == -ERESTART_RESTARTBLOCK) { in posix_cpu_nsleep()
1560 return -ERESTARTNOHAND; in posix_cpu_nsleep()
1562 restart_block->nanosleep.clockid = which_clock; in posix_cpu_nsleep()
1570 clockid_t which_clock = restart_block->nanosleep.clockid; in posix_cpu_nsleep_restart()
1573 t = ns_to_timespec64(restart_block->nanosleep.expires); in posix_cpu_nsleep_restart()
1593 timer->it_clock = PROCESS_CLOCK; in process_cpu_timer_create()
1613 timer->it_clock = THREAD_CLOCK; in thread_cpu_timer_create()