Lines Matching +full:3 +full:rd

324  *    3. no SMT is detected.
326 * 5. schedutil is driving the frequency of all CPUs of the rd;
352 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains() local
362 pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n", in build_perf_domains()
370 pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n", in build_perf_domains()
387 if (rd->pd) in build_perf_domains()
388 pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n", in build_perf_domains()
410 WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n", in build_perf_domains()
418 tmp = rd->pd; in build_perf_domains()
419 rcu_assign_pointer(rd->pd, pd); in build_perf_domains()
427 tmp = rd->pd; in build_perf_domains()
428 rcu_assign_pointer(rd->pd, NULL); in build_perf_domains()
440 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); in free_rootdomain() local
442 cpupri_cleanup(&rd->cpupri); in free_rootdomain()
443 cpudl_cleanup(&rd->cpudl); in free_rootdomain()
444 free_cpumask_var(rd->dlo_mask); in free_rootdomain()
445 free_cpumask_var(rd->rto_mask); in free_rootdomain()
446 free_cpumask_var(rd->online); in free_rootdomain()
447 free_cpumask_var(rd->span); in free_rootdomain()
448 free_pd(rd->pd); in free_rootdomain()
449 kfree(rd); in free_rootdomain()
452 void rq_attach_root(struct rq *rq, struct root_domain *rd) in rq_attach_root() argument
459 if (rq->rd) { in rq_attach_root()
460 old_rd = rq->rd; in rq_attach_root()
476 atomic_inc(&rd->refcount); in rq_attach_root()
477 rq->rd = rd; in rq_attach_root()
479 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
489 void sched_get_rd(struct root_domain *rd) in sched_get_rd() argument
491 atomic_inc(&rd->refcount); in sched_get_rd()
494 void sched_put_rd(struct root_domain *rd) in sched_put_rd() argument
496 if (!atomic_dec_and_test(&rd->refcount)) in sched_put_rd()
499 call_rcu(&rd->rcu, free_rootdomain); in sched_put_rd()
502 static int init_rootdomain(struct root_domain *rd) in init_rootdomain() argument
504 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) in init_rootdomain()
506 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) in init_rootdomain()
508 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) in init_rootdomain()
510 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) in init_rootdomain()
514 rd->rto_cpu = -1; in init_rootdomain()
515 raw_spin_lock_init(&rd->rto_lock); in init_rootdomain()
516 init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); in init_rootdomain()
519 init_dl_bw(&rd->dl_bw); in init_rootdomain()
520 if (cpudl_init(&rd->cpudl) != 0) in init_rootdomain()
523 if (cpupri_init(&rd->cpupri) != 0) in init_rootdomain()
528 cpudl_cleanup(&rd->cpudl); in init_rootdomain()
530 free_cpumask_var(rd->rto_mask); in init_rootdomain()
532 free_cpumask_var(rd->dlo_mask); in init_rootdomain()
534 free_cpumask_var(rd->online); in init_rootdomain()
536 free_cpumask_var(rd->span); in init_rootdomain()
556 struct root_domain *rd; in alloc_rootdomain() local
558 rd = kzalloc(sizeof(*rd), GFP_KERNEL); in alloc_rootdomain()
559 if (!rd) in alloc_rootdomain()
562 if (init_rootdomain(rd) != 0) { in alloc_rootdomain()
563 kfree(rd); in alloc_rootdomain()
567 return rd; in alloc_rootdomain()
673 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) in cpu_attach_domain() argument
710 rq_attach_root(rq, rd); in cpu_attach_domain()
721 struct root_domain *rd; member
751 * node 0 1 2 3
755 * 3: 20 30 20 10
763 * 3 ----- 2
769 * For the above NUMA topology that gives 3 levels:
771 * NUMA-2 0-3 0-3 0-3 0-3
772 * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2}
774 * NUMA-1 0-1,3 0-2 1-3 0,2-3
775 * groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3}
777 * NUMA-0 0 1 2 3
786 * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
791 * gets us the first 0-1,3
792 * - the only uncovered node is 2, who's child domain is 1-3.
797 * end up at those groups (they would end up in group: 0-1,3).
812 * node 0 1 2 3
816 * 3: 30 20 20 10
824 * 2 ----- 3
826 * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
832 * NUMA-2 0-3 0-3
833 * groups: {0-2},{1-3} {1-3},{0-2}
835 * NUMA-1 0-2 0-3 0-3 1-3
837 * NUMA-0 0 1 2 3
1010 * The tree consists of 3 primary data structures:
1026 * CPU 0 1 2 3 4 5 6 7
1035 * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1036 * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1038 * CPU 0 1 2 3 4 5 6 7
1234 if (!atomic_read(&d->rd->refcount)) in __free_domain_allocs()
1235 free_rootdomain(&d->rd->rcu); in __free_domain_allocs()
1258 d->rd = alloc_rootdomain(); in __visit_domain_allocation_hell()
1259 if (!d->rd) in __visit_domain_allocation_hell()
2059 if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) in build_sched_domains()
2060 WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); in build_sched_domains()
2062 cpu_attach_domain(sd, d.rd, i); in build_sched_domains()
2071 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); in build_sched_domains()
2250 struct root_domain *rd; in partition_sched_domains_locked() local
2258 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; in partition_sched_domains_locked()
2259 dl_clear_root_domain(rd); in partition_sched_domains_locked()
2295 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
2300 /* No match - add perf. domains for a new rd */ in partition_sched_domains_locked()