Lines Matching refs:cpu_map
280 static void perf_domain_debug(const struct cpumask *cpu_map, in perf_domain_debug() argument
286 printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); in perf_domain_debug()
347 static bool build_perf_domains(const struct cpumask *cpu_map) in build_perf_domains() argument
349 int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map); in build_perf_domains()
351 int cpu = cpumask_first(cpu_map); in build_perf_domains()
363 cpumask_pr_args(cpu_map)); in build_perf_domains()
371 cpumask_pr_args(cpu_map)); in build_perf_domains()
375 for_each_cpu(i, cpu_map) { in build_perf_domains()
389 cpumask_pr_args(cpu_map)); in build_perf_domains()
411 cpumask_pr_args(cpu_map)); in build_perf_domains()
415 perf_domain_debug(cpu_map, pd); in build_perf_domains()
1226 static void __sdt_free(const struct cpumask *cpu_map);
1227 static int __sdt_alloc(const struct cpumask *cpu_map);
1230 const struct cpumask *cpu_map) in __free_domain_allocs() argument
1241 __sdt_free(cpu_map); in __free_domain_allocs()
1249 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) in __visit_domain_allocation_hell() argument
1253 if (__sdt_alloc(cpu_map)) in __visit_domain_allocation_hell()
1323 const struct cpumask *cpu_map, in sd_init() argument
1379 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); in sd_init()
1747 static int __sdt_alloc(const struct cpumask *cpu_map) in __sdt_alloc() argument
1771 for_each_cpu(j, cpu_map) { in __sdt_alloc()
1816 static void __sdt_free(const struct cpumask *cpu_map) in __sdt_free() argument
1824 for_each_cpu(j, cpu_map) { in __sdt_free()
1853 const struct cpumask *cpu_map, struct sched_domain_attr *attr, in build_sched_domain() argument
1856 struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu); in build_sched_domain()
1887 const struct cpumask *cpu_map, int cpu) in topology_span_sane() argument
1901 for_each_cpu(i, cpu_map) { in topology_span_sane()
1923 *asym_cpu_capacity_level(const struct cpumask *cpu_map) in asym_cpu_capacity_level() argument
1931 cap = arch_scale_cpu_capacity(cpumask_first(cpu_map)); in asym_cpu_capacity_level()
1933 for_each_cpu(i, cpu_map) { in asym_cpu_capacity_level()
1948 for_each_cpu(i, cpu_map) { in asym_cpu_capacity_level()
1956 for_each_cpu_and(j, tl->mask(i), cpu_map) { in asym_cpu_capacity_level()
1982 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) in build_sched_domains() argument
1992 if (WARN_ON(cpumask_empty(cpu_map))) in build_sched_domains()
1995 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); in build_sched_domains()
1999 tl_asym = asym_cpu_capacity_level(cpu_map); in build_sched_domains()
2002 for_each_cpu(i, cpu_map) { in build_sched_domains()
2013 if (WARN_ON(!topology_span_sane(tl, cpu_map, i))) in build_sched_domains()
2016 sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i); in build_sched_domains()
2022 if (cpumask_equal(cpu_map, sched_domain_span(sd))) in build_sched_domains()
2028 for_each_cpu(i, cpu_map) { in build_sched_domains()
2043 if (!cpumask_test_cpu(i, cpu_map)) in build_sched_domains()
2054 for_each_cpu(i, cpu_map) { in build_sched_domains()
2071 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); in build_sched_domains()
2076 __free_domain_allocs(&d, alloc_state, cpu_map); in build_sched_domains()
2136 int sched_init_domains(const struct cpumask *cpu_map) in sched_init_domains() argument
2149 cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN)); in sched_init_domains()
2160 static void detach_destroy_domains(const struct cpumask *cpu_map) in detach_destroy_domains() argument
2162 unsigned int cpu = cpumask_any(cpu_map); in detach_destroy_domains()
2169 for_each_cpu(i, cpu_map) in detach_destroy_domains()