Lines Matching +full:performance +full:- +full:domains
1 // SPDX-License-Identifier: GPL-2.0
36 struct sched_group *group = sd->groups; in sched_domain_debug_one()
37 unsigned long flags = sd->flags; in sched_domain_debug_one()
42 printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); in sched_domain_debug_one()
44 cpumask_pr_args(sched_domain_span(sd)), sd->name); in sched_domain_debug_one()
47 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); in sched_domain_debug_one()
50 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); in sched_domain_debug_one()
57 if ((meta_flags & SDF_SHARED_CHILD) && sd->child && in sched_domain_debug_one()
58 !(sd->child->flags & flag)) in sched_domain_debug_one()
62 if ((meta_flags & SDF_SHARED_PARENT) && sd->parent && in sched_domain_debug_one()
63 !(sd->parent->flags & flag)) in sched_domain_debug_one()
82 if (!(sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
92 group->sgc->id, in sched_domain_debug_one()
95 if ((sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
101 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) in sched_domain_debug_one()
102 printk(KERN_CONT " cap=%lu", group->sgc->capacity); in sched_domain_debug_one()
104 if (group == sd->groups && sd->child && in sched_domain_debug_one()
105 !cpumask_equal(sched_domain_span(sd->child), in sched_domain_debug_one()
107 printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); in sched_domain_debug_one()
112 group = group->next; in sched_domain_debug_one()
114 if (group != sd->groups) in sched_domain_debug_one()
117 } while (group != sd->groups); in sched_domain_debug_one()
121 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); in sched_domain_debug_one()
123 if (sd->parent && in sched_domain_debug_one()
124 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) in sched_domain_debug_one()
125 printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); in sched_domain_debug_one()
137 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); in sched_domain_debug()
141 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); in sched_domain_debug()
147 sd = sd->parent; in sched_domain_debug()
175 if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) && in sd_degenerate()
176 (sd->groups != sd->groups->next)) in sd_degenerate()
180 if (sd->flags & (SD_WAKE_AFFINE)) in sd_degenerate()
189 unsigned long cflags = sd->flags, pflags = parent->flags; in sd_parent_degenerate()
198 if (parent->groups == parent->groups->next) in sd_parent_degenerate()
229 return -EPERM; in sched_energy_aware_handler()
268 tmp = pd->next; in free_pd()
279 pd = pd->next; in find_pd()
299 pd->em_pd = obj; in pd_init()
316 em_pd_nr_perf_states(pd->em_pd)); in perf_domain_debug()
317 pd = pd->next; in perf_domain_debug()
358 * - nr_pd: the number of performance domains
359 * - nr_cpus: the number of CPUs
360 * - nr_ps: the sum of the number of performance states of all performance
361 * domains (for example, on a system with 2 performance domains,
362 * with 10 performance states each, nr_ps = 2 * 10 = 20).
364 * It is generally not a good idea to use such a model in the wake-up path on
367 * with per-CPU DVFS and less than 8 performance states each, for example.
377 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()
402 pr_warn("rd %*pbl: Disabling EAS: frequency-invariant load tracking not yet supported", in build_perf_domains()
417 gov = policy->governor; in build_perf_domains()
420 if (rd->pd) in build_perf_domains()
430 tmp->next = pd; in build_perf_domains()
434 * Count performance domains and performance states for the in build_perf_domains()
438 nr_ps += em_pd_nr_perf_states(pd->em_pd); in build_perf_domains()
450 /* Attach the new list of performance domains to the root domain. */ in build_perf_domains()
451 tmp = rd->pd; in build_perf_domains()
452 rcu_assign_pointer(rd->pd, pd); in build_perf_domains()
454 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); in build_perf_domains()
460 tmp = rd->pd; in build_perf_domains()
461 rcu_assign_pointer(rd->pd, NULL); in build_perf_domains()
463 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); in build_perf_domains()
475 cpupri_cleanup(&rd->cpupri); in free_rootdomain()
476 cpudl_cleanup(&rd->cpudl); in free_rootdomain()
477 free_cpumask_var(rd->dlo_mask); in free_rootdomain()
478 free_cpumask_var(rd->rto_mask); in free_rootdomain()
479 free_cpumask_var(rd->online); in free_rootdomain()
480 free_cpumask_var(rd->span); in free_rootdomain()
481 free_pd(rd->pd); in free_rootdomain()
492 if (rq->rd) { in rq_attach_root()
493 old_rd = rq->rd; in rq_attach_root()
495 if (cpumask_test_cpu(rq->cpu, old_rd->online)) in rq_attach_root()
498 cpumask_clear_cpu(rq->cpu, old_rd->span); in rq_attach_root()
505 if (!atomic_dec_and_test(&old_rd->refcount)) in rq_attach_root()
509 atomic_inc(&rd->refcount); in rq_attach_root()
510 rq->rd = rd; in rq_attach_root()
512 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
513 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) in rq_attach_root()
519 call_rcu(&old_rd->rcu, free_rootdomain); in rq_attach_root()
524 atomic_inc(&rd->refcount); in sched_get_rd()
529 if (!atomic_dec_and_test(&rd->refcount)) in sched_put_rd()
532 call_rcu(&rd->rcu, free_rootdomain); in sched_put_rd()
537 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) in init_rootdomain()
539 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) in init_rootdomain()
541 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) in init_rootdomain()
543 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) in init_rootdomain()
547 rd->rto_cpu = -1; in init_rootdomain()
548 raw_spin_lock_init(&rd->rto_lock); in init_rootdomain()
549 rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func); in init_rootdomain()
552 rd->visit_gen = 0; in init_rootdomain()
553 init_dl_bw(&rd->dl_bw); in init_rootdomain()
554 if (cpudl_init(&rd->cpudl) != 0) in init_rootdomain()
557 if (cpupri_init(&rd->cpupri) != 0) in init_rootdomain()
562 cpudl_cleanup(&rd->cpudl); in init_rootdomain()
564 free_cpumask_var(rd->rto_mask); in init_rootdomain()
566 free_cpumask_var(rd->dlo_mask); in init_rootdomain()
568 free_cpumask_var(rd->online); in init_rootdomain()
570 free_cpumask_var(rd->span); in init_rootdomain()
572 return -ENOMEM; in init_rootdomain()
576 * By default the system creates a single root-domain with all CPUs as
613 tmp = sg->next; in free_sched_groups()
615 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) in free_sched_groups()
616 kfree(sg->sgc); in free_sched_groups()
618 if (atomic_dec_and_test(&sg->ref)) in free_sched_groups()
631 free_sched_groups(sd->groups, 1); in destroy_sched_domain()
633 if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) in destroy_sched_domain()
634 kfree(sd->shared); in destroy_sched_domain()
643 struct sched_domain *parent = sd->parent; in destroy_sched_domains_rcu()
652 call_rcu(&sd->rcu, destroy_sched_domains_rcu); in destroy_sched_domains()
684 sds = sd->shared; in update_top_cache_domain()
712 /* Remove the sched domains which do not contribute to scheduling. */ in cpu_attach_domain()
714 struct sched_domain *parent = tmp->parent; in cpu_attach_domain()
719 tmp->parent = parent->parent; in cpu_attach_domain()
720 if (parent->parent) in cpu_attach_domain()
721 parent->parent->child = tmp; in cpu_attach_domain()
727 if (parent->flags & SD_PREFER_SIBLING) in cpu_attach_domain()
728 tmp->flags |= SD_PREFER_SIBLING; in cpu_attach_domain()
731 tmp = tmp->parent; in cpu_attach_domain()
736 sd = sd->parent; in cpu_attach_domain()
739 struct sched_group *sg = sd->groups; in cpu_attach_domain()
747 sg->flags = 0; in cpu_attach_domain()
748 } while (sg != sd->groups); in cpu_attach_domain()
750 sd->child = NULL; in cpu_attach_domain()
757 tmp = rq->sd; in cpu_attach_domain()
758 rcu_assign_pointer(rq->sd, sd); in cpu_attach_domain()
795 * Given a node-distance table, for example:
805 * 0 ----- 1
809 * 3 ----- 2
811 * We want to construct domains and groups to represent this. The way we go
812 * about doing this is to build the domains on 'hops'. For each NUMA level we
817 * NUMA-2 0-3 0-3 0-3 0-3
818 * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2}
820 * NUMA-1 0-1,3 0-2 1-3 0,2-3
823 * NUMA-0 0 1 2 3
828 * represented multiple times -- hence the "overlap" naming for this part of
832 * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
836 * - the first group of each domain is its child domain; this
837 * gets us the first 0-1,3
838 * - the only uncovered node is 2, who's child domain is 1-3.
841 * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
842 * groups include the CPUs of Node-0, while those CPUs would not in fact ever
843 * end up at those groups (they would end up in group: 0-1,3).
866 * 0 ----- 1
870 * 2 ----- 3
878 * NUMA-2 0-3 0-3
879 * groups: {0-2},{1-3} {1-3},{0-2}
881 * NUMA-1 0-2 0-3 0-3 1-3
883 * NUMA-0 0 1 2 3
901 struct sd_data *sdd = sd->private; in build_balance_mask()
908 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask()
915 if (!sibling->child) in build_balance_mask()
919 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) in build_balance_mask()
930 * XXX: This creates per-node group entries; since the load-balancer will
931 * immediately access remote memory to construct this group's load-balance
947 if (sd->child) { in build_group_from_child_sched_domain()
948 cpumask_copy(sg_span, sched_domain_span(sd->child)); in build_group_from_child_sched_domain()
949 sg->flags = sd->child->flags; in build_group_from_child_sched_domain()
954 atomic_inc(&sg->ref); in build_group_from_child_sched_domain()
962 struct sd_data *sdd = sd->private; in init_overlap_sched_group()
969 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
970 if (atomic_inc_return(&sg->sgc->ref) == 1) in init_overlap_sched_group()
976 * Initialize sgc->capacity such that even if we mess up the in init_overlap_sched_group()
977 * domains and no possible iteration will get us here, we won't in init_overlap_sched_group()
981 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group()
982 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
983 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
993 while (sibling->child && in find_descended_sibling()
994 !cpumask_subset(sched_domain_span(sibling->child), in find_descended_sibling()
996 sibling = sibling->child; in find_descended_sibling()
1003 while (sibling->child && in find_descended_sibling()
1004 cpumask_equal(sched_domain_span(sibling->child), in find_descended_sibling()
1006 sibling = sibling->child; in find_descended_sibling()
1017 struct sd_data *sdd = sd->private; in build_overlap_sched_groups()
1029 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups()
1033 * domain tree is of unequal depth, make sure to skip domains in build_overlap_sched_groups()
1038 * Domains should always include the CPU they're built on, so in build_overlap_sched_groups()
1059 * 0 --- 1 --- 2 --- 3 in build_overlap_sched_groups()
1061 * NUMA-3 0-3 N/A N/A 0-3 in build_overlap_sched_groups()
1062 * groups: {0-2},{1-3} {1-3},{0-2} in build_overlap_sched_groups()
1064 * NUMA-2 0-2 0-3 0-3 1-3 in build_overlap_sched_groups()
1065 * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2} in build_overlap_sched_groups()
1067 * NUMA-1 0-1 0-2 1-3 2-3 in build_overlap_sched_groups()
1070 * NUMA-0 0 1 2 3 in build_overlap_sched_groups()
1072 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the in build_overlap_sched_groups()
1075 if (sibling->child && in build_overlap_sched_groups()
1076 !cpumask_subset(sched_domain_span(sibling->child), span)) in build_overlap_sched_groups()
1091 last->next = sg; in build_overlap_sched_groups()
1093 last->next = first; in build_overlap_sched_groups()
1095 sd->groups = first; in build_overlap_sched_groups()
1102 return -ENOMEM; in build_overlap_sched_groups()
1107 * Package topology (also see the load-balance blurb in fair.c)
1112 * - Simultaneous multithreading (SMT)
1113 * - Multi-Core Cache (MC)
1114 * - Package (DIE)
1120 * sched_domain -> sched_group -> sched_group_capacity
1122 * `-' `-'
1124 * The sched_domains are per-CPU and have a two way link (parent & child) and
1128 * denoting the domains of the level below (or individual CPUs in case of the
1140 * - or -
1142 * DIE 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
1143 * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1144 * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1157 * - The first is the balance_cpu (see should_we_balance() and the
1158 * load-balance blub in fair.c); for each group we only want 1 CPU to
1161 * - The second is the sched_group_capacity; we want all identical groups
1179 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group()
1180 struct sched_domain *child = sd->child; in get_group()
1187 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
1188 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1191 already_visited = atomic_inc_return(&sg->ref) > 1; in get_group()
1193 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); in get_group()
1202 sg->flags = child->flags; in get_group()
1208 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group()
1209 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in get_group()
1210 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in get_group()
1217 * covered by the given span, will set each group's ->cpumask correctly,
1218 * and will initialize their ->sgc.
1226 struct sd_data *sdd = sd->private; in build_sched_groups()
1249 last->next = sg; in build_sched_groups()
1252 last->next = first; in build_sched_groups()
1253 sd->groups = first; in build_sched_groups()
1270 struct sched_group *sg = sd->groups; in init_sched_groups_capacity()
1275 int cpu, max_cpu = -1; in init_sched_groups_capacity()
1277 sg->group_weight = cpumask_weight(sched_group_span(sg)); in init_sched_groups_capacity()
1279 if (!(sd->flags & SD_ASYM_PACKING)) in init_sched_groups_capacity()
1288 sg->asym_prefer_cpu = max_cpu; in init_sched_groups_capacity()
1291 sg = sg->next; in init_sched_groups_capacity()
1292 } while (sg != sd->groups); in init_sched_groups_capacity()
1317 #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus)
1363 if (capacity == entry->capacity) in asym_cpu_capacity_update_data()
1370 entry->capacity = capacity; in asym_cpu_capacity_update_data()
1371 list_add(&entry->link, &asym_cap_list); in asym_cpu_capacity_update_data()
1377 * Build-up/update list of CPUs grouped by their capacities
1378 * An update requires explicit request to rebuild sched domains
1394 list_del(&entry->link); in asym_cpu_capacity_scan()
1405 list_del(&entry->link); in asym_cpu_capacity_scan()
1411 * Initializers for schedule domains
1412 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1415 static int default_relax_domain_level = -1;
1432 if (!attr || attr->relax_domain_level < 0) { in set_domain_attribute()
1437 request = attr->relax_domain_level; in set_domain_attribute()
1439 if (sd->level > request) { in set_domain_attribute()
1441 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); in set_domain_attribute()
1453 if (!atomic_read(&d->rd->refcount)) in __free_domain_allocs()
1454 free_rootdomain(&d->rd->rcu); in __free_domain_allocs()
1457 free_percpu(d->sd); in __free_domain_allocs()
1474 d->sd = alloc_percpu(struct sched_domain *); in __visit_domain_allocation_hell()
1475 if (!d->sd) in __visit_domain_allocation_hell()
1477 d->rd = alloc_rootdomain(); in __visit_domain_allocation_hell()
1478 if (!d->rd) in __visit_domain_allocation_hell()
1491 struct sd_data *sdd = sd->private; in claim_allocations()
1493 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
1494 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
1496 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations()
1497 *per_cpu_ptr(sdd->sds, cpu) = NULL; in claim_allocations()
1499 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations()
1500 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations()
1502 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) in claim_allocations()
1503 *per_cpu_ptr(sdd->sgc, cpu) = NULL; in claim_allocations()
1524 * SD_SHARE_CPUCAPACITY - describes SMT topologies
1525 * SD_SHARE_PKG_RESOURCES - describes shared caches
1526 * SD_NUMA - describes NUMA topologies
1531 * SD_ASYM_PACKING - describes SMT quirks
1544 struct sd_data *sdd = &tl->data; in sd_init()
1545 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in sd_init()
1553 sched_domains_curr_level = tl->numa_level; in sd_init()
1556 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init()
1558 if (tl->sd_flags) in sd_init()
1559 sd_flags = (*tl->sd_flags)(); in sd_init()
1591 .name = tl->name, in sd_init()
1596 cpumask_and(sd_span, cpu_map, tl->mask(cpu)); in sd_init()
1599 sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); in sd_init()
1601 WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) == in sd_init()
1609 if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child) in sd_init()
1610 sd->child->flags &= ~SD_PREFER_SIBLING; in sd_init()
1612 if (sd->flags & SD_SHARE_CPUCAPACITY) { in sd_init()
1613 sd->imbalance_pct = 110; in sd_init()
1615 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { in sd_init()
1616 sd->imbalance_pct = 117; in sd_init()
1617 sd->cache_nice_tries = 1; in sd_init()
1620 } else if (sd->flags & SD_NUMA) { in sd_init()
1621 sd->cache_nice_tries = 2; in sd_init()
1623 sd->flags &= ~SD_PREFER_SIBLING; in sd_init()
1624 sd->flags |= SD_SERIALIZE; in sd_init()
1625 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { in sd_init()
1626 sd->flags &= ~(SD_BALANCE_EXEC | in sd_init()
1633 sd->cache_nice_tries = 1; in sd_init()
1640 if (sd->flags & SD_SHARE_PKG_RESOURCES) { in sd_init()
1641 sd->shared = *per_cpu_ptr(sdd->sds, sd_id); in sd_init()
1642 atomic_inc(&sd->shared->ref); in sd_init()
1643 atomic_set(&sd->shared->nr_busy_cpus, sd_weight); in sd_init()
1646 sd->private = sdd; in sd_init()
1652 * Topology list, bottom-up.
1675 for (tl = sched_domain_topology; tl->mask; tl++)
1761 * - If the maximum distance between any nodes is 1 hop, the system
1763 * - If for two nodes A and B, located N > 1 hops away from each other,
1816 * O(nr_nodes^2) deduplicating selection sort -- in order to find the in sched_init_numa()
1899 sched_numa_warn("Node-distance not symmetric"); in sched_init_numa()
1950 WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]); in sched_init_numa()
2039 * sched_numa_find_closest() - given the NUMA topology, find the cpu
2078 struct sd_data *sdd = &tl->data; in __sdt_alloc()
2080 sdd->sd = alloc_percpu(struct sched_domain *); in __sdt_alloc()
2081 if (!sdd->sd) in __sdt_alloc()
2082 return -ENOMEM; in __sdt_alloc()
2084 sdd->sds = alloc_percpu(struct sched_domain_shared *); in __sdt_alloc()
2085 if (!sdd->sds) in __sdt_alloc()
2086 return -ENOMEM; in __sdt_alloc()
2088 sdd->sg = alloc_percpu(struct sched_group *); in __sdt_alloc()
2089 if (!sdd->sg) in __sdt_alloc()
2090 return -ENOMEM; in __sdt_alloc()
2092 sdd->sgc = alloc_percpu(struct sched_group_capacity *); in __sdt_alloc()
2093 if (!sdd->sgc) in __sdt_alloc()
2094 return -ENOMEM; in __sdt_alloc()
2105 return -ENOMEM; in __sdt_alloc()
2107 *per_cpu_ptr(sdd->sd, j) = sd; in __sdt_alloc()
2112 return -ENOMEM; in __sdt_alloc()
2114 *per_cpu_ptr(sdd->sds, j) = sds; in __sdt_alloc()
2119 return -ENOMEM; in __sdt_alloc()
2121 sg->next = sg; in __sdt_alloc()
2123 *per_cpu_ptr(sdd->sg, j) = sg; in __sdt_alloc()
2128 return -ENOMEM; in __sdt_alloc()
2131 sgc->id = j; in __sdt_alloc()
2134 *per_cpu_ptr(sdd->sgc, j) = sgc; in __sdt_alloc()
2147 struct sd_data *sdd = &tl->data; in __sdt_free()
2152 if (sdd->sd) { in __sdt_free()
2153 sd = *per_cpu_ptr(sdd->sd, j); in __sdt_free()
2154 if (sd && (sd->flags & SD_OVERLAP)) in __sdt_free()
2155 free_sched_groups(sd->groups, 0); in __sdt_free()
2156 kfree(*per_cpu_ptr(sdd->sd, j)); in __sdt_free()
2159 if (sdd->sds) in __sdt_free()
2160 kfree(*per_cpu_ptr(sdd->sds, j)); in __sdt_free()
2161 if (sdd->sg) in __sdt_free()
2162 kfree(*per_cpu_ptr(sdd->sg, j)); in __sdt_free()
2163 if (sdd->sgc) in __sdt_free()
2164 kfree(*per_cpu_ptr(sdd->sgc, j)); in __sdt_free()
2166 free_percpu(sdd->sd); in __sdt_free()
2167 sdd->sd = NULL; in __sdt_free()
2168 free_percpu(sdd->sds); in __sdt_free()
2169 sdd->sds = NULL; in __sdt_free()
2170 free_percpu(sdd->sg); in __sdt_free()
2171 sdd->sg = NULL; in __sdt_free()
2172 free_percpu(sdd->sgc); in __sdt_free()
2173 sdd->sgc = NULL; in __sdt_free()
2184 sd->level = child->level + 1; in build_sched_domain()
2185 sched_domain_level_max = max(sched_domain_level_max, sd->level); in build_sched_domain()
2186 child->parent = sd; in build_sched_domain()
2193 child->name, sd->name); in build_sched_domain()
2209 * any two given CPUs at this (non-NUMA) topology level.
2217 if (tl->flags & SDTL_OVERLAP) in topology_span_sane()
2221 * Non-NUMA levels cannot partially overlap - they must be either in topology_span_sane()
2223 * breaking the sched_group lists - i.e. a later get_group() pass in topology_span_sane()
2235 if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && in topology_span_sane()
2236 cpumask_intersects(tl->mask(cpu), tl->mask(i))) in topology_span_sane()
2244 * Build sched domains for a given set of CPUs and attach the sched domains
2254 int i, ret = -ENOMEM; in build_sched_domains()
2264 /* Set up domains for CPUs specified by the cpu_map: */ in build_sched_domains()
2276 has_asym |= sd->flags & SD_ASYM_CPUCAPACITY; in build_sched_domains()
2280 if (tl->flags & SDTL_OVERLAP) in build_sched_domains()
2281 sd->flags |= SD_OVERLAP; in build_sched_domains()
2287 /* Build the groups for the domains */ in build_sched_domains()
2289 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
2290 sd->span_weight = cpumask_weight(sched_domain_span(sd)); in build_sched_domains()
2291 if (sd->flags & SD_OVERLAP) { in build_sched_domains()
2309 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
2310 struct sched_domain *child = sd->child; in build_sched_domains()
2312 if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child && in build_sched_domains()
2313 (child->flags & SD_SHARE_PKG_RESOURCES)) { in build_sched_domains()
2320 * arbitrary cutoff based two factors -- SMT and in build_sched_domains()
2321 * memory channels. For SMT-2, the intent is to in build_sched_domains()
2323 * SMT-4 or SMT-8 *may* benefit from a different in build_sched_domains()
2337 nr_llcs = sd->span_weight / child->span_weight; in build_sched_domains()
2339 imb = sd->span_weight >> 3; in build_sched_domains()
2343 sd->imb_numa_nr = imb; in build_sched_domains()
2346 top_p = sd->parent; in build_sched_domains()
2347 while (top_p && !(top_p->flags & SD_NUMA)) { in build_sched_domains()
2348 top_p = top_p->parent; in build_sched_domains()
2350 imb_span = top_p ? top_p->span_weight : sd->span_weight; in build_sched_domains()
2352 int factor = max(1U, (sd->span_weight / imb_span)); in build_sched_domains()
2354 sd->imb_numa_nr = imb * factor; in build_sched_domains()
2360 for (i = nr_cpumask_bits-1; i >= 0; i--) { in build_sched_domains()
2364 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
2370 /* Attach the domains */ in build_sched_domains()
2377 if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) in build_sched_domains()
2378 WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); in build_sched_domains()
2389 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); in build_sched_domains()
2399 /* Current sched domains: */
2402 /* Number of sched domains in 'doms_cur': */
2405 /* Attributes of custom domains in 'doms_cur' */
2451 * Set up scheduler domains and groups. For now this just excludes isolated
2475 * Detach sched domains from a group of CPUs specified in cpu_map
2510 * Partition sched domains as specified by the 'ndoms_new'
2519 * current 'doms_cur' domains and in the new 'doms_new', we can leave
2527 * 'fallback_doms', it also forces the domains to be rebuilt.
2530 * ndoms_new == 0 is a special case for destroying existing domains,
2563 /* Destroy deleted domains: */ in partition_sched_domains_locked()
2572 * its dl_bw->total_bw needs to be cleared. It in partition_sched_domains_locked()
2576 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; in partition_sched_domains_locked()
2581 /* No match - a current sched domain not in new doms_new[] */ in partition_sched_domains_locked()
2595 /* Build new domains: */ in partition_sched_domains_locked()
2602 /* No match - add a new doms_new */ in partition_sched_domains_locked()
2609 /* Build perf. domains: */ in partition_sched_domains_locked()
2613 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
2618 /* No match - add perf. domains for a new rd */ in partition_sched_domains_locked()
2626 /* Remember the new sched domains: */ in partition_sched_domains_locked()