/Linux-v5.15/include/trace/events/ |
D | iocost.h | 16 TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, 19 TP_ARGS(iocg, path, now, last_period, cur_period, vtime), 24 __field(u64, now) 39 __entry->now = now->now; 40 __entry->vnow = now->vnow; 41 __entry->vrate = now->vrate; 51 TP_printk("[%s:%s] now=%llu:%llu vrate=%llu " 55 __entry->now, __entry->vnow, __entry->vrate, 63 TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, 66 TP_ARGS(iocg, path, now, last_period, cur_period, vtime) [all …]
|
D | alarmtimer.h | 47 TP_PROTO(struct alarm *alarm, ktime_t now), 49 TP_ARGS(alarm, now), 55 __field(s64, now) 62 __entry->now = now; 65 TP_printk("alarmtimer:%p type:%s expires:%llu now:%llu", 69 __entry->now 75 TP_PROTO(struct alarm *alarm, ktime_t now), 77 TP_ARGS(alarm, now) 82 TP_PROTO(struct alarm *alarm, ktime_t now), 84 TP_ARGS(alarm, now) [all …]
|
D | timer.h | 64 __field( unsigned long, now ) 72 __entry->now = jiffies; 78 (long)__entry->expires - __entry->now, 98 __field( unsigned long, now ) 105 __entry->now = jiffies; 110 TP_printk("timer=%p function=%ps now=%lu baseclk=%lu", 111 __entry->timer, __entry->function, __entry->now, 227 * @now: pointer to variable which contains current time of the 234 TP_PROTO(struct hrtimer *hrtimer, ktime_t *now), 236 TP_ARGS(hrtimer, now), [all …]
|
/Linux-v5.15/net/rxrpc/ |
D | call_event.c | 31 unsigned long now = jiffies; in rxrpc_propose_ping() local 32 unsigned long ping_at = now + rxrpc_idle_ack_delay; in rxrpc_propose_ping() 36 rxrpc_reduce_call_timer(call, ping_at, now, in rxrpc_propose_ping() 112 unsigned long now = jiffies, ack_at; in __rxrpc_propose_ACK() local 120 ack_at += now; in __rxrpc_propose_ACK() 123 rxrpc_reduce_call_timer(call, ack_at, now, in __rxrpc_propose_ACK() 162 ktime_t now, max_age, oldest, ack_ts; in rxrpc_resend() local 170 now = ktime_get_real(); in rxrpc_resend() 171 max_age = ktime_sub(now, jiffies_to_usecs(rto_j)); in rxrpc_resend() 186 oldest = now; in rxrpc_resend() [all …]
|
/Linux-v5.15/tools/testing/selftests/timens/ |
D | exec.c | 23 struct timespec now, tst; in main() local 28 if (sscanf(argv[1], "%ld", &now.tv_sec) != 1) in main() 33 if (abs(tst.tv_sec - now.tv_sec) > 5) in main() 34 return pr_fail("%ld %ld\n", now.tv_sec, tst.tv_sec); in main() 43 clock_gettime(CLOCK_MONOTONIC, &now); in main() 53 if (abs(tst.tv_sec - now.tv_sec) > 5) in main() 55 now.tv_sec, tst.tv_sec); in main() 73 if (abs(tst.tv_sec - now.tv_sec - OFFSET) > 5) in main() 75 now.tv_sec + OFFSET, tst.tv_sec); in main() 79 snprintf(now_str, sizeof(now_str), "%ld", now.tv_sec + OFFSET); in main()
|
D | clock_nanosleep.c | 26 struct timespec *now, *rem; member 36 clock_nanosleep(args->clockid, args->abs ? TIMER_ABSTIME : 0, args->now, args->rem); in call_nanosleep() 43 struct timespec now = {}, rem; in run_test() local 44 struct thread_args args = { .now = &now, .rem = &rem, .clockid = clockid}; in run_test() 64 now.tv_sec = start.tv_sec; in run_test() 65 now.tv_nsec = start.tv_nsec; in run_test() 68 now.tv_sec += 3600; in run_test()
|
/Linux-v5.15/block/ |
D | blk-iocost.c | 558 u64 now; member 944 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now) in ioc_refresh_vrate() argument 946 s64 pleft = ioc->period_at + ioc->period_us - now->now; in ioc_refresh_vrate() 1023 static void ioc_now(struct ioc *ioc, struct ioc_now *now) in ioc_now() argument 1027 now->now_ns = ktime_get(); in ioc_now() 1028 now->now = ktime_to_us(now->now_ns); in ioc_now() 1029 now->vrate = atomic64_read(&ioc->vtime_rate); in ioc_now() 1041 now->vnow = ioc->period_at_vtime + in ioc_now() 1042 (now->now - ioc->period_at) * now->vrate; in ioc_now() 1046 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now) in ioc_start_period() argument [all …]
|
/Linux-v5.15/kernel/time/ |
D | tick-sched.c | 57 static void tick_do_update_jiffies64(ktime_t now) in tick_do_update_jiffies64() argument 72 if (ktime_before(now, smp_load_acquire(&tick_next_period))) in tick_do_update_jiffies64() 86 if (ktime_before(now, nextp)) in tick_do_update_jiffies64() 96 if (ktime_before(now, tick_next_period)) { in tick_do_update_jiffies64() 103 delta = ktime_sub(now, tick_next_period); in tick_do_update_jiffies64() 172 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) in tick_sched_do_timer() argument 197 tick_do_update_jiffies64(now); in tick_sched_do_timer() 611 static void tick_nohz_update_jiffies(ktime_t now) in tick_nohz_update_jiffies() argument 615 __this_cpu_write(tick_cpu_sched.idle_waketime, now); in tick_nohz_update_jiffies() 618 tick_do_update_jiffies64(now); in tick_nohz_update_jiffies() [all …]
|
D | timer_list.c | 23 u64 now; member 47 int idx, u64 now) in print_timer() argument 55 (long long)(ktime_to_ns(hrtimer_get_softexpires(timer)) - now), in print_timer() 56 (long long)(ktime_to_ns(hrtimer_get_expires(timer)) - now)); in print_timer() 61 u64 now) in print_active_timers() argument 91 print_timer(m, timer, &tmp, i, now); in print_active_timers() 99 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) in print_base() argument 112 print_active_timers(m, base, now + ktime_to_ns(base->offset)); in print_base() 115 static void print_cpu(struct seq_file *m, int cpu, u64 now) in print_cpu() argument 123 print_base(m, cpu_base->clock_base + i, now); in print_cpu() [all …]
|
/Linux-v5.15/tools/power/cpupower/bench/ |
D | benchmark.c | 35 long long now, then; in calculate_timespace() local 44 now = get_time(); in calculate_timespace() 48 timed = (unsigned int)(then - now); in calculate_timespace() 55 now = get_time(); in calculate_timespace() 59 timed = (unsigned int)(then - now); in calculate_timespace() 81 long long now, then; in start_benchmark() local 126 now = get_time(); in start_benchmark() 130 performance_time += then - now - sleep_time; in start_benchmark() 135 (long)(then - now), sleep_time, in start_benchmark() 152 now = get_time(); in start_benchmark() [all …]
|
/Linux-v5.15/arch/x86/events/ |
D | msr.c | 222 u64 now; in msr_read_counter() local 225 rdmsrl(event->hw.event_base, now); in msr_read_counter() 227 now = rdtsc_ordered(); in msr_read_counter() 229 return now; in msr_read_counter() 234 u64 prev, now; in msr_event_update() local 240 now = msr_read_counter(event); in msr_event_update() 242 if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev) in msr_event_update() 245 delta = now - prev; in msr_event_update() 251 now = now & (1ULL << 31) ? (now >> 16) & 0x3f : -1; in msr_event_update() 252 local64_set(&event->count, now); in msr_event_update() [all …]
|
/Linux-v5.15/drivers/block/drbd/ |
D | drbd_debugfs.c | 96 static void seq_print_one_request(struct seq_file *m, struct drbd_request *req, unsigned long now) in seq_print_one_request() argument 108 seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif)); in seq_print_one_request() 109 seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif); in seq_print_one_request() 110 seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif); in seq_print_one_request() 113 seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif); in seq_print_one_request() 114 seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif); in seq_print_one_request() 115 seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif); in seq_print_one_request() 122 static void seq_print_minor_vnr_req(struct seq_file *m, struct drbd_request *req, unsigned long now) in seq_print_minor_vnr_req() argument 125 seq_print_one_request(m, req, now); in seq_print_minor_vnr_req() 128 …int_resource_pending_meta_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now) in seq_print_resource_pending_meta_io() argument [all …]
|
/Linux-v5.15/include/net/ |
D | codel_impl.h | 108 codel_time_t now) in codel_should_drop() argument 119 vars->ldelay = now - skb_time_func(skb); in codel_should_drop() 135 vars->first_above_time = now + params->interval; in codel_should_drop() 136 } else if (codel_time_after(now, vars->first_above_time)) { in codel_should_drop() 153 codel_time_t now; in codel_dequeue() local 160 now = codel_get_time(); in codel_dequeue() 162 skb_len_func, skb_time_func, backlog, now); in codel_dequeue() 167 } else if (codel_time_after_eq(now, vars->drop_next)) { in codel_dequeue() 173 * that the next drop should happen now, in codel_dequeue() 177 codel_time_after_eq(now, vars->drop_next)) { in codel_dequeue() [all …]
|
/Linux-v5.15/kernel/sched/ |
D | psi.c | 254 u64 now, state_start; in get_recent_times() local 264 now = cpu_clock(cpu); in get_recent_times() 283 times[s] += now - state_start; in get_recent_times() 369 static u64 update_averages(struct psi_group *group, u64 now) in update_averages() argument 378 if (now - expires >= psi_period) in update_averages() 379 missed_periods = div_u64(now - expires, psi_period); in update_averages() 389 period = now - (group->avg_last_update + (missed_periods * psi_period)); in update_averages() 390 group->avg_last_update = now; in update_averages() 428 u64 now; in psi_avgs_work() local 435 now = sched_clock(); in psi_avgs_work() [all …]
|
D | pelt.h | 4 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); 5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se); 6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); 7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); 8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); 11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity); 19 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) in update_thermal_load_avg() argument 160 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument 166 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument 172 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument [all …]
|
D | pelt.c | 93 * ... |---x---|------| ... |------|-----x (now) 163 * (now) (~1ms ago) (~2ms ago) 184 ___update_load_sum(u64 now, struct sched_avg *sa, in ___update_load_sum() argument 189 delta = now - sa->last_update_time; in ___update_load_sum() 195 sa->last_update_time = now; in ___update_load_sum() 224 * Now we know we crossed measurement unit boundaries. The *_avg in ___update_load_sum() 299 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se) in __update_load_avg_blocked_se() argument 301 if (___update_load_sum(now, &se->avg, 0, 0, 0)) { in __update_load_avg_blocked_se() 310 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument 312 if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se), in __update_load_avg_se() [all …]
|
/Linux-v5.15/drivers/md/bcache/ |
D | util.c | 165 uint64_t now, duration, last; in bch_time_stats_update() local 169 now = local_clock(); in bch_time_stats_update() 170 duration = time_after64(now, start_time) in bch_time_stats_update() 171 ? now - start_time : 0; in bch_time_stats_update() 172 last = time_after64(now, stats->last) in bch_time_stats_update() 173 ? now - stats->last : 0; in bch_time_stats_update() 188 stats->last = now ?: 1; in bch_time_stats_update() 203 uint64_t now = local_clock(); in bch_next_delay() local 213 if (time_before64(now + NSEC_PER_SEC * 5LLU / 2LLU, d->next)) in bch_next_delay() 214 d->next = now + NSEC_PER_SEC * 5LLU / 2LLU; in bch_next_delay() [all …]
|
/Linux-v5.15/arch/x86/kernel/ |
D | pvclock.c | 119 struct timespec64 now; in pvclock_read_wallclock() local 132 now.tv_sec = wall_clock->sec; in pvclock_read_wallclock() 133 now.tv_nsec = wall_clock->nsec; in pvclock_read_wallclock() 138 delta += now.tv_sec * NSEC_PER_SEC + now.tv_nsec; in pvclock_read_wallclock() 140 now.tv_nsec = do_div(delta, NSEC_PER_SEC); in pvclock_read_wallclock() 141 now.tv_sec = delta; in pvclock_read_wallclock() 143 set_normalized_timespec64(ts, now.tv_sec, now.tv_nsec); in pvclock_read_wallclock()
|
/Linux-v5.15/tools/testing/selftests/timers/ |
D | nanosleep.c | 103 struct timespec now, target, rel; in nanosleep_test() local 106 if (clock_gettime(clockid, &now)) in nanosleep_test() 108 target = timespec_add(now, ns); in nanosleep_test() 112 clock_gettime(clockid, &now); in nanosleep_test() 114 if (!in_order(target, now)) in nanosleep_test() 118 clock_gettime(clockid, &now); in nanosleep_test() 122 target = timespec_add(now, ns); in nanosleep_test() 124 clock_gettime(clockid, &now); in nanosleep_test() 126 if (!in_order(target, now)) in nanosleep_test()
|
/Linux-v5.15/kernel/trace/ |
D | trace_clock.c | 98 u64 now, prev_time; in trace_clock_global() local 116 now = sched_clock_cpu(this_cpu); in trace_clock_global() 118 /* Make sure that now is always greater than or equal to prev_time */ in trace_clock_global() 119 if ((s64)(now - prev_time) < 0) in trace_clock_global() 120 now = prev_time; in trace_clock_global() 133 if ((s64)(now - prev_time) < 0) in trace_clock_global() 134 now = prev_time; in trace_clock_global() 136 trace_clock_struct.prev_time = now; in trace_clock_global() 144 return now; in trace_clock_global()
|
/Linux-v5.15/fs/affs/ |
D | Changes | 103 - The partition checker now also ignores the 114 It also marks the inode dirty now (which is not 207 they appear now as normal Unix links. They are 208 now resolved only once in lookup(). The backside 237 - Owner/Group defaults now to the fs user (i.e. 242 name buffer. It is now silently truncated to 253 - fsuser is now checked last. 262 - Extension block caches are now allocated on 272 - Hash chains are now sorted by block numbers. 280 - Errors and warnings are now reported via a [all …]
|
/Linux-v5.15/net/sched/ |
D | sch_etf.c | 80 ktime_t now; in is_packet_valid() local 101 now = q->get_time(); in is_packet_valid() 102 if (ktime_before(txtime, now) || ktime_before(txtime, q->last)) in is_packet_valid() 194 /* Now we may need to re-arm the qdisc watchdog for the next packet. */ in etf_enqueue_timesortedlist() 201 ktime_t now) in timesortedlist_drop() argument 208 if (ktime_after(skb->tstamp, now)) in timesortedlist_drop() 213 /* The rbnode field in the skb re-uses these fields, now that in timesortedlist_drop() 237 /* The rbnode field in the skb re-uses these fields, now that in timesortedlist_remove() 257 ktime_t now, next; in etf_dequeue_timesortedlist() local 263 now = q->get_time(); in etf_dequeue_timesortedlist() [all …]
|
/Linux-v5.15/arch/s390/kernel/ |
D | idle.c | 92 unsigned long now, idle_time, idle_enter, idle_exit, in_idle; in show_idle_time() local 103 now = get_tod_clock(); in show_idle_time() 107 } else if (now > idle_enter) { in show_idle_time() 108 in_idle = now - idle_enter; in show_idle_time() 119 unsigned long now, idle_enter, idle_exit, in_idle; in arch_cpu_idle_time() local 128 now = get_tod_clock(); in arch_cpu_idle_time() 132 } else if (now > idle_enter) { in arch_cpu_idle_time() 133 in_idle = now - idle_enter; in arch_cpu_idle_time()
|
/Linux-v5.15/drivers/rtc/ |
D | interface.c | 215 struct rtc_time before, now; in __rtc_read_alarm() local 268 memcpy(&before, &now, sizeof(struct rtc_time)); in __rtc_read_alarm() 283 err = rtc_read_time(rtc, &now); in __rtc_read_alarm() 288 } while (before.tm_min != now.tm_min || in __rtc_read_alarm() 289 before.tm_hour != now.tm_hour || in __rtc_read_alarm() 290 before.tm_mon != now.tm_mon || in __rtc_read_alarm() 291 before.tm_year != now.tm_year); in __rtc_read_alarm() 297 alarm->time.tm_sec = now.tm_sec; in __rtc_read_alarm() 299 alarm->time.tm_min = now.tm_min; in __rtc_read_alarm() 301 alarm->time.tm_hour = now.tm_hour; in __rtc_read_alarm() [all …]
|
/Linux-v5.15/Documentation/filesystems/ |
D | porting.rst | 43 Keep in mind that now you need explicit initialization of private data 58 informative error value to report). Call it foo_fill_super(). Now declare:: 87 Now we have the exclusion between ->lookup() and directory removal (by 98 and ->readdir() are called without BKL now. Grab it on entry, drop upon return 100 parts do not need BKL - better yet, now you can shift lock_kernel() and 150 ->setattr() is called without BKL now. Caller _always_ holds ->i_mutex, so 152 Callers of notify_change() need ->i_mutex now. 173 s_export_op is now required for exporting a filesystem. 273 ->permission() is called without BKL now. Grab it on entry, drop upon 275 your method or its parts do not need BKL - better yet, now you can [all …]
|