Searched refs:SCHED_CAPACITY_SCALE (Results 1 – 6 of 6) sorted by relevance
19 DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;34 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;67 if (new_capacity > SCHED_CAPACITY_SCALE) in cpu_capacity_store()
13 #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) macro
87 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) in sched_domain_debug_one()693 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group()694 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()852 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group()853 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in get_group()
1755 return SCHED_CAPACITY_SCALE; in arch_scale_freq_capacity()1767 return SCHED_CAPACITY_SCALE; in arch_scale_cpu_capacity()1775 return SCHED_CAPACITY_SCALE; in arch_scale_cpu_capacity()2194 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
1501 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity); in update_numa_stats()1505 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE)); in update_numa_stats()3700 if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100))) in util_est_dequeue()5746 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / in find_idlest_group()5748 runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) / in find_idlest_group()7884 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity; in update_sg_lb_stats()8130 SCHED_CAPACITY_SCALE); in check_asym_packing()8159 (busiest->load_per_task * SCHED_CAPACITY_SCALE) / in fix_small_imbalance()8178 capa_now /= SCHED_CAPACITY_SCALE; in fix_small_imbalance()8189 busiest->load_per_task * SCHED_CAPACITY_SCALE) { in fix_small_imbalance()[all …]
6022 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; in sched_init()