/Linux-v4.19/block/ |
D | kyber-iosched.c | 147 unsigned int sched_domain, u64 target) in kyber_lat_status() argument 151 if (!cb->stat[sched_domain].nr_samples) in kyber_lat_status() 154 latency = cb->stat[sched_domain].mean; in kyber_lat_status() 171 unsigned int sched_domain, int this_status, in kyber_adjust_rw_depth() argument 185 orig_depth = depth = kqd->domain_tokens[sched_domain].sb.depth; in kyber_adjust_rw_depth() 215 depth = clamp(depth, 1U, kyber_depth[sched_domain]); in kyber_adjust_rw_depth() 217 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth); in kyber_adjust_rw_depth() 485 unsigned int sched_domain; in rq_clear_domain_token() local 490 sched_domain = kyber_sched_domain(rq->cmd_flags); in rq_clear_domain_token() 491 sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr, in rq_clear_domain_token() [all …]
|
/Linux-v4.19/kernel/sched/ |
D | topology.c | 28 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, in sched_domain_debug_one() 115 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() 148 static int sd_degenerate(struct sched_domain *sd) in sd_degenerate() 174 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) in sd_parent_degenerate() 355 static void destroy_sched_domain(struct sched_domain *sd) in destroy_sched_domain() 371 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); in destroy_sched_domains_rcu() 374 struct sched_domain *parent = sd->parent; in destroy_sched_domains_rcu() 380 static void destroy_sched_domains(struct sched_domain *sd) in destroy_sched_domains() 395 DEFINE_PER_CPU(struct sched_domain *, sd_llc); 399 DEFINE_PER_CPU(struct sched_domain *, sd_numa); [all …]
|
D | sched.h | 839 struct sched_domain *sd; 1160 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) in highest_flag_domain() 1162 struct sched_domain *sd, *hsd = NULL; in highest_flag_domain() 1173 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) in lowest_flag_domain() 1175 struct sched_domain *sd; in lowest_flag_domain() 1185 DECLARE_PER_CPU(struct sched_domain *, sd_llc); 1189 DECLARE_PER_CPU(struct sched_domain *, sd_numa); 1190 DECLARE_PER_CPU(struct sched_domain *, sd_asym); 1595 extern void update_group_capacity(struct sched_domain *sd, int cpu); 1762 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) in arch_scale_cpu_capacity()
|
D | fair.c | 1753 struct sched_domain *sd; in task_numa_migrate() 5613 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, in wake_affine_weight() 5655 static int wake_affine(struct sched_domain *sd, struct task_struct *p, in wake_affine() 5689 find_idlest_group(struct sched_domain *sd, struct task_struct *p, in find_idlest_group() 5881 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, in find_idlest_cpu() 5898 struct sched_domain *tmp; in find_idlest_cpu() 5991 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_core() 6028 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_smt() 6047 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_core() 6052 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_smt() [all …]
|
D | stats.c | 25 struct sched_domain *sd; in show_schedstat()
|
D | debug.c | 261 sd_alloc_ctl_domain_table(struct sched_domain *sd) in sd_alloc_ctl_domain_table() 289 struct sched_domain *sd; in sd_alloc_ctl_cpu_table()
|
D | deadline.c | 1862 struct sched_domain *sd; in find_later_rq()
|
D | rt.c | 1644 struct sched_domain *sd; in find_lowest_rq()
|
D | core.c | 501 struct sched_domain *sd; in get_nohz_timer_target() 1612 struct sched_domain *sd; in ttwu_stat()
|
/Linux-v4.19/include/linux/sched/ |
D | topology.h | 77 struct sched_domain { struct 79 struct sched_domain *parent; /* top domain must be null terminated */ argument 80 struct sched_domain *child; /* bottom domain must be null terminated */ argument 160 static inline struct cpumask *sched_domain_span(struct sched_domain *sd) in sched_domain_span() argument 180 struct sched_domain **__percpu sd;
|
/Linux-v4.19/include/linux/ |
D | arch_topology.h | 18 struct sched_domain; 20 unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu) in topology_get_cpu_scale()
|
/Linux-v4.19/Documentation/scheduler/ |
D | sched-domains.txt | 1 Each CPU has a "base" scheduling domain (struct sched_domain). The domain 39 the parent sched_domain (if it exists), and the parent of the parent and so 61 struct sched_domain fields, SD_FLAG_*, SD_*_INIT to get an idea of
|