Searched refs:SCHED_CAPACITY_SCALE (Results 1 – 9 of 9) sorted by relevance
24 DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;38 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
224 return SCHED_CAPACITY_SCALE; in arch_scale_cpu_capacity()
87 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) in sched_domain_debug_one()921 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group()922 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()923 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()1087 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group()1088 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in get_group()1089 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in get_group()
16 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)368 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); in sugov_iowait_boost()
808 unsigned long value : bits_per(SCHED_CAPACITY_SCALE);809 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);1984 return SCHED_CAPACITY_SCALE; in arch_scale_freq_capacity()2391 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
789 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;792 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;798 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)817 return SCHED_CAPACITY_SCALE; in uclamp_none()1132 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE) { in sysctl_sched_uclamp_handler()1181 if (upper_bound > SCHED_CAPACITY_SCALE) in uclamp_validate()6665 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; in sched_init()7237 .util = SCHED_CAPACITY_SCALE, in capacity_from_percent()7319 if (util_clamp == SCHED_CAPACITY_SCALE) { in cpu_uclamp_print()
3772 if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100))) in util_est_dequeue()5603 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / in find_idlest_group()5605 runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) / in find_idlest_group()8059 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity; in update_sg_lb_stats()8354 (busiest->load_per_task * SCHED_CAPACITY_SCALE) / in fix_small_imbalance()8373 capa_now /= SCHED_CAPACITY_SCALE; in fix_small_imbalance()8384 busiest->load_per_task * SCHED_CAPACITY_SCALE) { in fix_small_imbalance()8388 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / in fix_small_imbalance()8393 capa_move /= SCHED_CAPACITY_SCALE; in fix_small_imbalance()8441 load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE; in calculate_imbalance()[all …]
318 # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) macro596 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
745 will be SCHED_CAPACITY_SCALE/UCLAMP_BUCKETS_COUNT. The higher the