Lines Matching +full:performance +full:- +full:domains

1 // SPDX-License-Identifier: GPL-2.0
37 struct sched_group *group = sd->groups; in sched_domain_debug_one()
38 unsigned long flags = sd->flags; in sched_domain_debug_one()
43 printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); in sched_domain_debug_one()
45 cpumask_pr_args(sched_domain_span(sd)), sd->name); in sched_domain_debug_one()
48 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); in sched_domain_debug_one()
51 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); in sched_domain_debug_one()
58 if ((meta_flags & SDF_SHARED_CHILD) && sd->child && in sched_domain_debug_one()
59 !(sd->child->flags & flag)) in sched_domain_debug_one()
63 if ((meta_flags & SDF_SHARED_PARENT) && sd->parent && in sched_domain_debug_one()
64 !(sd->parent->flags & flag)) in sched_domain_debug_one()
83 if (!(sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
93 group->sgc->id, in sched_domain_debug_one()
96 if ((sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
102 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) in sched_domain_debug_one()
103 printk(KERN_CONT " cap=%lu", group->sgc->capacity); in sched_domain_debug_one()
105 if (group == sd->groups && sd->child && in sched_domain_debug_one()
106 !cpumask_equal(sched_domain_span(sd->child), in sched_domain_debug_one()
108 printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); in sched_domain_debug_one()
113 group = group->next; in sched_domain_debug_one()
115 if (group != sd->groups) in sched_domain_debug_one()
118 } while (group != sd->groups); in sched_domain_debug_one()
122 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); in sched_domain_debug_one()
124 if (sd->parent && in sched_domain_debug_one()
125 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) in sched_domain_debug_one()
126 printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); in sched_domain_debug_one()
138 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); in sched_domain_debug()
142 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); in sched_domain_debug()
148 sd = sd->parent; in sched_domain_debug()
176 if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) && in sd_degenerate()
177 (sd->groups != sd->groups->next)) in sd_degenerate()
181 if (sd->flags & (SD_WAKE_AFFINE)) in sd_degenerate()
190 unsigned long cflags = sd->flags, pflags = parent->flags; in sd_parent_degenerate()
199 if (parent->groups == parent->groups->next) in sd_parent_degenerate()
230 return -EPERM; in sched_energy_aware_handler()
248 tmp = pd->next; in free_pd()
259 pd = pd->next; in find_pd()
279 pd->em_pd = obj; in pd_init()
296 em_pd_nr_perf_states(pd->em_pd)); in perf_domain_debug()
297 pd = pd->next; in perf_domain_debug()
338 * - nr_pd: the number of performance domains
339 * - nr_cpus: the number of CPUs
340 * - nr_ps: the sum of the number of performance states of all performance
341 * domains (for example, on a system with 2 performance domains,
342 * with 10 performance states each, nr_ps = 2 * 10 = 20).
344 * It is generally not a good idea to use such a model in the wake-up path on
347 * with per-CPU DVFS and less than 8 performance states each, for example.
357 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()
382 pr_warn("rd %*pbl: Disabling EAS: frequency-invariant load tracking not yet supported", in build_perf_domains()
397 gov = policy->governor; in build_perf_domains()
400 if (rd->pd) in build_perf_domains()
410 tmp->next = pd; in build_perf_domains()
414 * Count performance domains and performance states for the in build_perf_domains()
418 nr_ps += em_pd_nr_perf_states(pd->em_pd); in build_perf_domains()
430 /* Attach the new list of performance domains to the root domain. */ in build_perf_domains()
431 tmp = rd->pd; in build_perf_domains()
432 rcu_assign_pointer(rd->pd, pd); in build_perf_domains()
434 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); in build_perf_domains()
440 tmp = rd->pd; in build_perf_domains()
441 rcu_assign_pointer(rd->pd, NULL); in build_perf_domains()
443 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); in build_perf_domains()
455 cpupri_cleanup(&rd->cpupri); in free_rootdomain()
456 cpudl_cleanup(&rd->cpudl); in free_rootdomain()
457 free_cpumask_var(rd->dlo_mask); in free_rootdomain()
458 free_cpumask_var(rd->rto_mask); in free_rootdomain()
459 free_cpumask_var(rd->online); in free_rootdomain()
460 free_cpumask_var(rd->span); in free_rootdomain()
461 free_pd(rd->pd); in free_rootdomain()
472 if (rq->rd) { in rq_attach_root()
473 old_rd = rq->rd; in rq_attach_root()
475 if (cpumask_test_cpu(rq->cpu, old_rd->online)) in rq_attach_root()
478 cpumask_clear_cpu(rq->cpu, old_rd->span); in rq_attach_root()
485 if (!atomic_dec_and_test(&old_rd->refcount)) in rq_attach_root()
489 atomic_inc(&rd->refcount); in rq_attach_root()
490 rq->rd = rd; in rq_attach_root()
492 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
493 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) in rq_attach_root()
499 call_rcu(&old_rd->rcu, free_rootdomain); in rq_attach_root()
504 atomic_inc(&rd->refcount); in sched_get_rd()
509 if (!atomic_dec_and_test(&rd->refcount)) in sched_put_rd()
512 call_rcu(&rd->rcu, free_rootdomain); in sched_put_rd()
517 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) in init_rootdomain()
519 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) in init_rootdomain()
521 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) in init_rootdomain()
523 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) in init_rootdomain()
527 rd->rto_cpu = -1; in init_rootdomain()
528 raw_spin_lock_init(&rd->rto_lock); in init_rootdomain()
529 init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); in init_rootdomain()
532 rd->visit_gen = 0; in init_rootdomain()
533 init_dl_bw(&rd->dl_bw); in init_rootdomain()
534 if (cpudl_init(&rd->cpudl) != 0) in init_rootdomain()
537 if (cpupri_init(&rd->cpupri) != 0) in init_rootdomain()
542 cpudl_cleanup(&rd->cpudl); in init_rootdomain()
544 free_cpumask_var(rd->rto_mask); in init_rootdomain()
546 free_cpumask_var(rd->dlo_mask); in init_rootdomain()
548 free_cpumask_var(rd->online); in init_rootdomain()
550 free_cpumask_var(rd->span); in init_rootdomain()
552 return -ENOMEM; in init_rootdomain()
556 * By default the system creates a single root-domain with all CPUs as
593 tmp = sg->next; in free_sched_groups()
595 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) in free_sched_groups()
596 kfree(sg->sgc); in free_sched_groups()
598 if (atomic_dec_and_test(&sg->ref)) in free_sched_groups()
611 free_sched_groups(sd->groups, 1); in destroy_sched_domain()
613 if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) in destroy_sched_domain()
614 kfree(sd->shared); in destroy_sched_domain()
623 struct sched_domain *parent = sd->parent; in destroy_sched_domains_rcu()
632 call_rcu(&sd->rcu, destroy_sched_domains_rcu); in destroy_sched_domains()
664 sds = sd->shared; in update_top_cache_domain()
693 /* Remove the sched domains which do not contribute to scheduling. */ in cpu_attach_domain()
695 struct sched_domain *parent = tmp->parent; in cpu_attach_domain()
700 tmp->parent = parent->parent; in cpu_attach_domain()
701 if (parent->parent) in cpu_attach_domain()
702 parent->parent->child = tmp; in cpu_attach_domain()
708 if (parent->flags & SD_PREFER_SIBLING) in cpu_attach_domain()
709 tmp->flags |= SD_PREFER_SIBLING; in cpu_attach_domain()
712 tmp = tmp->parent; in cpu_attach_domain()
717 sd = sd->parent; in cpu_attach_domain()
720 sd->child = NULL; in cpu_attach_domain()
723 for (tmp = sd; tmp; tmp = tmp->parent) in cpu_attach_domain()
724 numa_distance += !!(tmp->flags & SD_NUMA); in cpu_attach_domain()
729 tmp = rq->sd; in cpu_attach_domain()
730 rcu_assign_pointer(rq->sd, sd); in cpu_attach_domain()
767 * Given a node-distance table, for example:
777 * 0 ----- 1
781 * 3 ----- 2
783 * We want to construct domains and groups to represent this. The way we go
784 * about doing this is to build the domains on 'hops'. For each NUMA level we
789 * NUMA-2 0-3 0-3 0-3 0-3
790 * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2}
792 * NUMA-1 0-1,3 0-2 1-3 0,2-3
795 * NUMA-0 0 1 2 3
800 * represented multiple times -- hence the "overlap" naming for this part of
804 * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
808 * - the first group of each domain is its child domain; this
809 * gets us the first 0-1,3
810 * - the only uncovered node is 2, who's child domain is 1-3.
813 * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
814 * groups include the CPUs of Node-0, while those CPUs would not in fact ever
815 * end up at those groups (they would end up in group: 0-1,3).
838 * 0 ----- 1
842 * 2 ----- 3
850 * NUMA-2 0-3 0-3
851 * groups: {0-2},{1-3} {1-3},{0-2}
853 * NUMA-1 0-2 0-3 0-3 1-3
855 * NUMA-0 0 1 2 3
873 struct sd_data *sdd = sd->private; in build_balance_mask()
880 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask()
887 if (!sibling->child) in build_balance_mask()
891 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) in build_balance_mask()
902 * XXX: This creates per-node group entries; since the load-balancer will
903 * immediately access remote memory to construct this group's load-balance
919 if (sd->child) in build_group_from_child_sched_domain()
920 cpumask_copy(sg_span, sched_domain_span(sd->child)); in build_group_from_child_sched_domain()
924 atomic_inc(&sg->ref); in build_group_from_child_sched_domain()
932 struct sd_data *sdd = sd->private; in init_overlap_sched_group()
939 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
940 if (atomic_inc_return(&sg->sgc->ref) == 1) in init_overlap_sched_group()
946 * Initialize sgc->capacity such that even if we mess up the in init_overlap_sched_group()
947 * domains and no possible iteration will get us here, we won't in init_overlap_sched_group()
951 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group()
952 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
953 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
963 while (sibling->child && in find_descended_sibling()
964 !cpumask_subset(sched_domain_span(sibling->child), in find_descended_sibling()
966 sibling = sibling->child; in find_descended_sibling()
973 while (sibling->child && in find_descended_sibling()
974 cpumask_equal(sched_domain_span(sibling->child), in find_descended_sibling()
976 sibling = sibling->child; in find_descended_sibling()
987 struct sd_data *sdd = sd->private; in build_overlap_sched_groups()
999 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups()
1003 * domain tree is of unequal depth, make sure to skip domains in build_overlap_sched_groups()
1008 * Domains should always include the CPU they're built on, so in build_overlap_sched_groups()
1029 * 0 --- 1 --- 2 --- 3 in build_overlap_sched_groups()
1031 * NUMA-3 0-3 N/A N/A 0-3 in build_overlap_sched_groups()
1032 * groups: {0-2},{1-3} {1-3},{0-2} in build_overlap_sched_groups()
1034 * NUMA-2 0-2 0-3 0-3 1-3 in build_overlap_sched_groups()
1035 * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2} in build_overlap_sched_groups()
1037 * NUMA-1 0-1 0-2 1-3 2-3 in build_overlap_sched_groups()
1040 * NUMA-0 0 1 2 3 in build_overlap_sched_groups()
1042 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the in build_overlap_sched_groups()
1045 if (sibling->child && in build_overlap_sched_groups()
1046 !cpumask_subset(sched_domain_span(sibling->child), span)) in build_overlap_sched_groups()
1061 last->next = sg; in build_overlap_sched_groups()
1063 last->next = first; in build_overlap_sched_groups()
1065 sd->groups = first; in build_overlap_sched_groups()
1072 return -ENOMEM; in build_overlap_sched_groups()
1077 * Package topology (also see the load-balance blurb in fair.c)
1082 * - Simultaneous multithreading (SMT)
1083 * - Multi-Core Cache (MC)
1084 * - Package (DIE)
1090 * sched_domain -> sched_group -> sched_group_capacity
1092 * `-' `-'
1094 * The sched_domains are per-CPU and have a two way link (parent & child) and
1098 * denoting the domains of the level below (or individual CPUs in case of the
1110 * - or -
1112 * DIE 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
1113 * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1114 * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1127 * - The first is the balance_cpu (see should_we_balance() and the
1128 * load-balance blub in fair.c); for each group we only want 1 CPU to
1131 * - The second is the sched_group_capacity; we want all identical groups
1149 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group()
1150 struct sched_domain *child = sd->child; in get_group()
1157 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
1158 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1161 already_visited = atomic_inc_return(&sg->ref) > 1; in get_group()
1163 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); in get_group()
1177 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group()
1178 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in get_group()
1179 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in get_group()
1186 * covered by the given span, will set each group's ->cpumask correctly,
1187 * and will initialize their ->sgc.
1195 struct sd_data *sdd = sd->private; in build_sched_groups()
1218 last->next = sg; in build_sched_groups()
1221 last->next = first; in build_sched_groups()
1222 sd->groups = first; in build_sched_groups()
1239 struct sched_group *sg = sd->groups; in init_sched_groups_capacity()
1244 int cpu, max_cpu = -1; in init_sched_groups_capacity()
1246 sg->group_weight = cpumask_weight(sched_group_span(sg)); in init_sched_groups_capacity()
1248 if (!(sd->flags & SD_ASYM_PACKING)) in init_sched_groups_capacity()
1257 sg->asym_prefer_cpu = max_cpu; in init_sched_groups_capacity()
1260 sg = sg->next; in init_sched_groups_capacity()
1261 } while (sg != sd->groups); in init_sched_groups_capacity()
1286 #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus)
1332 if (capacity == entry->capacity) in asym_cpu_capacity_update_data()
1339 entry->capacity = capacity; in asym_cpu_capacity_update_data()
1340 list_add(&entry->link, &asym_cap_list); in asym_cpu_capacity_update_data()
1346 * Build-up/update list of CPUs grouped by their capacities
1347 * An update requires explicit request to rebuild sched domains
1363 list_del(&entry->link); in asym_cpu_capacity_scan()
1374 list_del(&entry->link); in asym_cpu_capacity_scan()
1380 * Initializers for schedule domains
1381 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1384 static int default_relax_domain_level = -1;
1401 if (!attr || attr->relax_domain_level < 0) { in set_domain_attribute()
1406 request = attr->relax_domain_level; in set_domain_attribute()
1408 if (sd->level > request) { in set_domain_attribute()
1410 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); in set_domain_attribute()
1422 if (!atomic_read(&d->rd->refcount)) in __free_domain_allocs()
1423 free_rootdomain(&d->rd->rcu); in __free_domain_allocs()
1426 free_percpu(d->sd); in __free_domain_allocs()
1443 d->sd = alloc_percpu(struct sched_domain *); in __visit_domain_allocation_hell()
1444 if (!d->sd) in __visit_domain_allocation_hell()
1446 d->rd = alloc_rootdomain(); in __visit_domain_allocation_hell()
1447 if (!d->rd) in __visit_domain_allocation_hell()
1460 struct sd_data *sdd = sd->private; in claim_allocations()
1462 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
1463 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
1465 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations()
1466 *per_cpu_ptr(sdd->sds, cpu) = NULL; in claim_allocations()
1468 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations()
1469 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations()
1471 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) in claim_allocations()
1472 *per_cpu_ptr(sdd->sgc, cpu) = NULL; in claim_allocations()
1496 * SD_SHARE_CPUCAPACITY - describes SMT topologies
1497 * SD_SHARE_PKG_RESOURCES - describes shared caches
1498 * SD_NUMA - describes NUMA topologies
1503 * SD_ASYM_PACKING - describes SMT quirks
1516 struct sd_data *sdd = &tl->data; in sd_init()
1517 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in sd_init()
1525 sched_domains_curr_level = tl->numa_level; in sd_init()
1528 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init()
1530 if (tl->sd_flags) in sd_init()
1531 sd_flags = (*tl->sd_flags)(); in sd_init()
1563 .name = tl->name, in sd_init()
1568 cpumask_and(sd_span, cpu_map, tl->mask(cpu)); in sd_init()
1571 sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); in sd_init()
1573 WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) == in sd_init()
1581 if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child) in sd_init()
1582 sd->child->flags &= ~SD_PREFER_SIBLING; in sd_init()
1584 if (sd->flags & SD_SHARE_CPUCAPACITY) { in sd_init()
1585 sd->imbalance_pct = 110; in sd_init()
1587 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { in sd_init()
1588 sd->imbalance_pct = 117; in sd_init()
1589 sd->cache_nice_tries = 1; in sd_init()
1592 } else if (sd->flags & SD_NUMA) { in sd_init()
1593 sd->cache_nice_tries = 2; in sd_init()
1595 sd->flags &= ~SD_PREFER_SIBLING; in sd_init()
1596 sd->flags |= SD_SERIALIZE; in sd_init()
1597 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { in sd_init()
1598 sd->flags &= ~(SD_BALANCE_EXEC | in sd_init()
1605 sd->cache_nice_tries = 1; in sd_init()
1612 if (sd->flags & SD_SHARE_PKG_RESOURCES) { in sd_init()
1613 sd->shared = *per_cpu_ptr(sdd->sds, sd_id); in sd_init()
1614 atomic_inc(&sd->shared->ref); in sd_init()
1615 atomic_set(&sd->shared->nr_busy_cpus, sd_weight); in sd_init()
1618 sd->private = sdd; in sd_init()
1624 * Topology list, bottom-up.
1641 for (tl = sched_domain_topology; tl->mask; tl++)
1707 * - If the maximum distance between any nodes is 1 hop, the system
1709 * - If for two nodes A and B, located N > 1 hops away from each other,
1757 * O(nr_nodes^2) deduplicating selection sort -- in order to find the in sched_init_numa()
1849 sched_numa_warn("Node-distance not symmetric"); in sched_init_numa()
1898 sched_max_numa_distance = sched_domains_numa_distance[nr_levels - 1]; in sched_init_numa()
1917 * Thus, when a CPU of a never-onlined-before node gets plugged in, in __sched_domains_numa_masks_set()
1980 * sched_numa_find_closest() - given the NUMA topology, find the cpu
2007 struct sd_data *sdd = &tl->data; in __sdt_alloc()
2009 sdd->sd = alloc_percpu(struct sched_domain *); in __sdt_alloc()
2010 if (!sdd->sd) in __sdt_alloc()
2011 return -ENOMEM; in __sdt_alloc()
2013 sdd->sds = alloc_percpu(struct sched_domain_shared *); in __sdt_alloc()
2014 if (!sdd->sds) in __sdt_alloc()
2015 return -ENOMEM; in __sdt_alloc()
2017 sdd->sg = alloc_percpu(struct sched_group *); in __sdt_alloc()
2018 if (!sdd->sg) in __sdt_alloc()
2019 return -ENOMEM; in __sdt_alloc()
2021 sdd->sgc = alloc_percpu(struct sched_group_capacity *); in __sdt_alloc()
2022 if (!sdd->sgc) in __sdt_alloc()
2023 return -ENOMEM; in __sdt_alloc()
2034 return -ENOMEM; in __sdt_alloc()
2036 *per_cpu_ptr(sdd->sd, j) = sd; in __sdt_alloc()
2041 return -ENOMEM; in __sdt_alloc()
2043 *per_cpu_ptr(sdd->sds, j) = sds; in __sdt_alloc()
2048 return -ENOMEM; in __sdt_alloc()
2050 sg->next = sg; in __sdt_alloc()
2052 *per_cpu_ptr(sdd->sg, j) = sg; in __sdt_alloc()
2057 return -ENOMEM; in __sdt_alloc()
2060 sgc->id = j; in __sdt_alloc()
2063 *per_cpu_ptr(sdd->sgc, j) = sgc; in __sdt_alloc()
2076 struct sd_data *sdd = &tl->data; in __sdt_free()
2081 if (sdd->sd) { in __sdt_free()
2082 sd = *per_cpu_ptr(sdd->sd, j); in __sdt_free()
2083 if (sd && (sd->flags & SD_OVERLAP)) in __sdt_free()
2084 free_sched_groups(sd->groups, 0); in __sdt_free()
2085 kfree(*per_cpu_ptr(sdd->sd, j)); in __sdt_free()
2088 if (sdd->sds) in __sdt_free()
2089 kfree(*per_cpu_ptr(sdd->sds, j)); in __sdt_free()
2090 if (sdd->sg) in __sdt_free()
2091 kfree(*per_cpu_ptr(sdd->sg, j)); in __sdt_free()
2092 if (sdd->sgc) in __sdt_free()
2093 kfree(*per_cpu_ptr(sdd->sgc, j)); in __sdt_free()
2095 free_percpu(sdd->sd); in __sdt_free()
2096 sdd->sd = NULL; in __sdt_free()
2097 free_percpu(sdd->sds); in __sdt_free()
2098 sdd->sds = NULL; in __sdt_free()
2099 free_percpu(sdd->sg); in __sdt_free()
2100 sdd->sg = NULL; in __sdt_free()
2101 free_percpu(sdd->sgc); in __sdt_free()
2102 sdd->sgc = NULL; in __sdt_free()
2113 sd->level = child->level + 1; in build_sched_domain()
2114 sched_domain_level_max = max(sched_domain_level_max, sd->level); in build_sched_domain()
2115 child->parent = sd; in build_sched_domain()
2122 child->name, sd->name); in build_sched_domain()
2138 * any two given CPUs at this (non-NUMA) topology level.
2146 if (tl->flags & SDTL_OVERLAP) in topology_span_sane()
2150 * Non-NUMA levels cannot partially overlap - they must be either in topology_span_sane()
2152 * breaking the sched_group lists - i.e. a later get_group() pass in topology_span_sane()
2164 if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && in topology_span_sane()
2165 cpumask_intersects(tl->mask(cpu), tl->mask(i))) in topology_span_sane()
2173 * Build sched domains for a given set of CPUs and attach the sched domains
2183 int i, ret = -ENOMEM; in build_sched_domains()
2193 /* Set up domains for CPUs specified by the cpu_map: */ in build_sched_domains()
2205 has_asym |= sd->flags & SD_ASYM_CPUCAPACITY; in build_sched_domains()
2209 if (tl->flags & SDTL_OVERLAP) in build_sched_domains()
2210 sd->flags |= SD_OVERLAP; in build_sched_domains()
2216 /* Build the groups for the domains */ in build_sched_domains()
2218 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
2219 sd->span_weight = cpumask_weight(sched_domain_span(sd)); in build_sched_domains()
2220 if (sd->flags & SD_OVERLAP) { in build_sched_domains()
2231 for (i = nr_cpumask_bits-1; i >= 0; i--) { in build_sched_domains()
2235 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
2241 /* Attach the domains */ in build_sched_domains()
2248 if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) in build_sched_domains()
2249 WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); in build_sched_domains()
2260 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); in build_sched_domains()
2270 /* Current sched domains: */
2273 /* Number of sched domains in 'doms_cur': */
2276 /* Attributes of custom domains in 'doms_cur' */
2322 * Set up scheduler domains and groups. For now this just excludes isolated
2346 * Detach sched domains from a group of CPUs specified in cpu_map
2381 * Partition sched domains as specified by the 'ndoms_new'
2390 * current 'doms_cur' domains and in the new 'doms_new', we can leave
2398 * 'fallback_doms', it also forces the domains to be rebuilt.
2401 * ndoms_new == 0 is a special case for destroying existing domains,
2434 /* Destroy deleted domains: */ in partition_sched_domains_locked()
2443 * its dl_bw->total_bw needs to be cleared. It in partition_sched_domains_locked()
2447 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; in partition_sched_domains_locked()
2452 /* No match - a current sched domain not in new doms_new[] */ in partition_sched_domains_locked()
2466 /* Build new domains: */ in partition_sched_domains_locked()
2473 /* No match - add a new doms_new */ in partition_sched_domains_locked()
2480 /* Build perf. domains: */ in partition_sched_domains_locked()
2484 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
2489 /* No match - add perf. domains for a new rd */ in partition_sched_domains_locked()
2497 /* Remember the new sched domains: */ in partition_sched_domains_locked()