/Linux-v5.4/tools/perf/util/ |
D | stat-shadow.c | 427 struct evsel *evsel, double avg, in print_stalled_cycles_frontend() argument 438 ratio = avg / total * 100.0; in print_stalled_cycles_frontend() 451 struct evsel *evsel, double avg, in print_stalled_cycles_backend() argument 462 ratio = avg / total * 100.0; in print_stalled_cycles_backend() 472 double avg, in print_branch_misses() argument 483 ratio = avg / total * 100.0; in print_branch_misses() 493 double avg, in print_l1_dcache_misses() argument 505 ratio = avg / total * 100.0; in print_l1_dcache_misses() 515 double avg, in print_l1_icache_misses() argument 527 ratio = avg / total * 100.0; in print_l1_icache_misses() [all …]
|
D | stat-display.c | 38 double total, double avg) in print_noise_pct() argument 40 double pct = rel_stddev_stats(total, avg); in print_noise_pct() 49 struct evsel *evsel, double avg) in print_noise() argument 57 print_noise_pct(config, stddev_stats(&ps->res_stats[0]), avg); in print_noise() 338 int id, int nr, struct evsel *evsel, double avg) in abs_printout() argument 355 fprintf(output, fmt, avg, config->csv_sep); in abs_printout() 779 double avg, avg_enabled, avg_running; member 789 cd->avg += avg_stats(&ps->res_stats[0]); in counter_aggr_cb() 804 struct caggr_data cd = { .avg = 0.0 }; in print_counter_aggr() 812 uval = cd.avg * counter->scale; in print_counter_aggr() [all …]
|
/Linux-v5.4/net/netfilter/ |
D | xt_limit.c | 105 || user2credits(r->avg * r->burst) < user2credits(r->avg)) { in limit_mt_check() 107 r->avg, r->burst); in limit_mt_check() 120 priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ in limit_mt_check() 123 r->cost = user2credits(r->avg); in limit_mt_check() 139 u_int32_t avg; member 155 .avg = cm->avg, in limit_mt_compat_from_user() 169 .avg = m->avg, in limit_mt_compat_to_user()
|
D | xt_hashlimit.c | 144 to->avg = cfg->avg; in cfg_copy() 156 to->avg = cfg->avg; in cfg_copy() 600 user2rate_bytes((u32)hinfo->cfg.avg); in rateinfo_init() 607 dh->rateinfo.rate = user2rate(hinfo->cfg.avg); in rateinfo_init() 614 dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg); in rateinfo_init() 617 dh->rateinfo.credit = user2credits(hinfo->cfg.avg * in rateinfo_init() 619 dh->rateinfo.cost = user2credits(hinfo->cfg.avg, revision); in rateinfo_init() 880 if (cfg->avg == 0 || cfg->avg > U32_MAX) { in hashlimit_mt_check_common() 890 if (user2credits_byte(cfg->avg) == 0) { in hashlimit_mt_check_common() 892 cfg->avg); in hashlimit_mt_check_common() [all …]
|
/Linux-v5.4/net/bridge/netfilter/ |
D | ebt_limit.c | 75 user2credits(info->avg * info->burst) < user2credits(info->avg)) { in ebt_limit_mt_check() 77 info->avg, info->burst); in ebt_limit_mt_check() 83 info->credit = user2credits(info->avg * info->burst); in ebt_limit_mt_check() 84 info->credit_cap = user2credits(info->avg * info->burst); in ebt_limit_mt_check() 85 info->cost = user2credits(info->avg); in ebt_limit_mt_check() 96 compat_uint_t avg, burst; member
|
/Linux-v5.4/drivers/mfd/ |
D | pcf50633-adc.c | 27 int avg; member 54 static void adc_setup(struct pcf50633 *pcf, int channel, int avg) in adc_setup() argument 63 pcf50633_reg_write(pcf, PCF50633_REG_ADCC1, channel | avg | in adc_setup() 77 adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg); in trigger_next_adc_job_if_any() 116 int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg) in pcf50633_adc_sync_read() argument 123 ret = pcf50633_adc_async_read(pcf, mux, avg, in pcf50633_adc_sync_read() 134 int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg, in pcf50633_adc_async_read() argument 146 req->avg = avg; in pcf50633_adc_async_read()
|
/Linux-v5.4/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/ |
D | Util.py | 20 def avg(total, n): function 40 min, max, avg, count = dict[key] 45 avg = (avg + value) / 2 46 dict[key] = (min, max, avg, count + 1)
|
/Linux-v5.4/drivers/iio/humidity/ |
D | hts221_core.c | 180 const struct hts221_avg *avg = &hts221_avg_list[type]; in hts221_update_avg() local 184 if (avg->avg_avl[i] == val) in hts221_update_avg() 190 data = ((i << __ffs(avg->mask)) & avg->mask); in hts221_update_avg() 191 err = regmap_update_bits(hw->regmap, avg->addr, in hts221_update_avg() 192 avg->mask, data); in hts221_update_avg() 221 const struct hts221_avg *avg = &hts221_avg_list[HTS221_SENSOR_H]; in hts221_sysfs_rh_oversampling_avail() local 225 for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++) in hts221_sysfs_rh_oversampling_avail() 227 avg->avg_avl[i]); in hts221_sysfs_rh_oversampling_avail() 238 const struct hts221_avg *avg = &hts221_avg_list[HTS221_SENSOR_T]; in hts221_sysfs_temp_oversampling_avail() local 242 for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++) in hts221_sysfs_temp_oversampling_avail() [all …]
|
/Linux-v5.4/kernel/sched/ |
D | pelt.c | 268 if (___update_load_sum(now, &se->avg, 0, 0, 0)) { in __update_load_avg_blocked_se() 269 ___update_load_avg(&se->avg, se_weight(se), se_runnable(se)); in __update_load_avg_blocked_se() 279 if (___update_load_sum(now, &se->avg, !!se->on_rq, !!se->on_rq, in __update_load_avg_se() 282 ___update_load_avg(&se->avg, se_weight(se), se_runnable(se)); in __update_load_avg_se() 283 cfs_se_util_change(&se->avg); in __update_load_avg_se() 293 if (___update_load_sum(now, &cfs_rq->avg, in __update_load_avg_cfs_rq() 298 ___update_load_avg(&cfs_rq->avg, 1, 1); in __update_load_avg_cfs_rq()
|
D | psi.c | 281 static void calc_avgs(unsigned long avg[3], int missed_periods, in calc_avgs() 288 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods); in calc_avgs() 289 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods); in calc_avgs() 290 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods); in calc_avgs() 296 avg[0] = calc_load(avg[0], EXP_10s, pct); in calc_avgs() 297 avg[1] = calc_load(avg[1], EXP_60s, pct); in calc_avgs() 298 avg[2] = calc_load(avg[2], EXP_300s, pct); in calc_avgs() 403 calc_avgs(group->avg[s], missed_periods, sample, period); in update_averages() 956 unsigned long avg[3]; in psi_show() local 961 avg[w] = group->avg[res * 2 + full][w]; in psi_show() [all …]
|
D | pelt.h | 29 static inline void cfs_se_util_change(struct sched_avg *avg) in cfs_se_util_change() argument 37 enqueued = avg->util_est.enqueued; in cfs_se_util_change() 43 WRITE_ONCE(avg->util_est.enqueued, enqueued); in cfs_se_util_change() 100 u32 util_sum = rq->cfs.avg.util_sum; in update_idle_rq_clock_pelt()
|
D | fair.c | 734 struct sched_avg *sa = &se->avg; in init_entity_runnable_average() 784 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg() 786 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg() 789 if (cfs_rq->avg.util_avg != 0) { in post_init_entity_util_avg() 790 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg() 791 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg() 811 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg() 2025 delta = p->se.avg.load_sum; in numa_get_avg_runtime() 2838 cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg; in enqueue_runnable_load_avg() 2839 cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum; in enqueue_runnable_load_avg() [all …]
|
D | debug.c | 407 P(se->avg.load_avg); in print_cfs_group_stats() 408 P(se->avg.util_avg); in print_cfs_group_stats() 409 P(se->avg.runnable_load_avg); in print_cfs_group_stats() 529 cfs_rq->avg.load_avg); in print_cfs_rq() 531 cfs_rq->avg.runnable_load_avg); in print_cfs_rq() 533 cfs_rq->avg.util_avg); in print_cfs_rq() 535 cfs_rq->avg.util_est.enqueued); in print_cfs_rq() 945 P(se.avg.load_sum); in proc_sched_show_task() 946 P(se.avg.runnable_load_sum); in proc_sched_show_task() 947 P(se.avg.util_sum); in proc_sched_show_task() [all …]
|
/Linux-v5.4/kernel/trace/ |
D | trace_benchmark.c | 45 unsigned int avg; in trace_do_benchmark() local 109 avg = delta; in trace_do_benchmark() 124 seed = avg; in trace_do_benchmark() 140 bm_last, bm_first, bm_max, bm_min, avg, std, stddev); in trace_do_benchmark() 143 bm_avg = avg; in trace_do_benchmark()
|
D | ring_buffer_benchmark.c | 238 unsigned long avg; in ring_buffer_producer() local 348 avg = NSEC_PER_MSEC / hit; in ring_buffer_producer() 349 trace_printk("%ld ns per entry\n", avg); in ring_buffer_producer() 366 avg = NSEC_PER_MSEC / (hit + missed); in ring_buffer_producer() 367 trace_printk("%ld ns per entry\n", avg); in ring_buffer_producer()
|
/Linux-v5.4/include/uapi/linux/netfilter/ |
D | xt_hashlimit.h | 34 __u32 avg; /* Average secs between packets * scale */ member 58 __u32 avg; /* Average secs between packets * scale */ member 71 __u64 avg; /* Average secs between packets * scale */ member 85 __u64 avg; /* Average secs between packets * scale */ member
|
/Linux-v5.4/drivers/cpuidle/governors/ |
D | menu.c | 183 unsigned int min, max, thresh, avg; in get_typical_interval() local 216 avg = sum >> INTERVAL_SHIFT; in get_typical_interval() 218 avg = div_u64(sum, divisor); in get_typical_interval() 225 int64_t diff = (int64_t)value - avg; in get_typical_interval() 247 if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3)) in get_typical_interval() 249 return avg; in get_typical_interval()
|
/Linux-v5.4/net/dccp/ccids/lib/ |
D | tfrc.h | 49 static inline u32 tfrc_ewma(const u32 avg, const u32 newval, const u8 weight) in tfrc_ewma() argument 51 return avg ? (weight * avg + (10 - weight) * newval) / 10 : newval; in tfrc_ewma()
|
/Linux-v5.4/tools/perf/bench/ |
D | epoll-ctl.c | 282 unsigned long avg[EPOLL_NR_OPS]; in print_summary() local 286 avg[i] = avg_stats(&all_stats[i]); in print_summary() 291 avg[OP_EPOLL_ADD], rel_stddev_stats(stddev[OP_EPOLL_ADD], in print_summary() 292 avg[OP_EPOLL_ADD])); in print_summary() 294 avg[OP_EPOLL_MOD], rel_stddev_stats(stddev[OP_EPOLL_MOD], in print_summary() 295 avg[OP_EPOLL_MOD])); in print_summary() 297 avg[OP_EPOLL_DEL], rel_stddev_stats(stddev[OP_EPOLL_DEL], in print_summary() 298 avg[OP_EPOLL_DEL])); in print_summary()
|
/Linux-v5.4/kernel/time/ |
D | test_udelay.c | 32 uint64_t avg; in udelay_test_single() local 56 avg = sum; in udelay_test_single() 57 do_div(avg, iters); in udelay_test_single() 60 (usecs * 1000) - allowed_error_ns, min, avg, max); in udelay_test_single()
|
/Linux-v5.4/drivers/power/supply/ |
D | ab8500_fg.c | 107 int avg; member 383 struct ab8500_fg_avg_cap *avg = &di->avg_cap; in ab8500_fg_add_cap_sample() local 386 avg->sum += sample - avg->samples[avg->pos]; in ab8500_fg_add_cap_sample() 387 avg->samples[avg->pos] = sample; in ab8500_fg_add_cap_sample() 388 avg->time_stamps[avg->pos] = now; in ab8500_fg_add_cap_sample() 389 avg->pos++; in ab8500_fg_add_cap_sample() 391 if (avg->pos == NBR_AVG_SAMPLES) in ab8500_fg_add_cap_sample() 392 avg->pos = 0; in ab8500_fg_add_cap_sample() 394 if (avg->nbr_samples < NBR_AVG_SAMPLES) in ab8500_fg_add_cap_sample() 395 avg->nbr_samples++; in ab8500_fg_add_cap_sample() [all …]
|
/Linux-v5.4/drivers/clk/bcm/ |
D | clk-bcm53573-ilp.c | 50 int avg; in bcm53573_ilp_recalc_rate() local 87 avg = sum / num; in bcm53573_ilp_recalc_rate() 89 return parent_rate * 4 / avg; in bcm53573_ilp_recalc_rate()
|
/Linux-v5.4/include/linux/mfd/pcf50633/ |
D | adc.h | 63 pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg, 67 pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg);
|
/Linux-v5.4/net/rxrpc/ |
D | peer_event.c | 308 u64 sum = peer->rtt_sum, avg; in rxrpc_peer_add_rtt() local 332 avg = sum / RXRPC_RTT_CACHE_SIZE; in rxrpc_peer_add_rtt() 334 avg = sum; in rxrpc_peer_add_rtt() 335 do_div(avg, usage); in rxrpc_peer_add_rtt() 339 peer->rtt = avg; in rxrpc_peer_add_rtt() 341 usage, avg); in rxrpc_peer_add_rtt()
|
/Linux-v5.4/tools/perf/scripts/python/ |
D | futex-contention.py | 49 min, max, avg, count = lock_waits[tid, lock] 51 (process_names[tid], tid, lock, count, avg))
|