Home
last modified time | relevance | path

Searched refs:sched_domain (Results 1 – 13 of 13) sorted by relevance

/Linux-v5.15/block/
Dkyber-iosched.c214 unsigned int sched_domain, unsigned int type) in flush_latency_buckets() argument
216 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in flush_latency_buckets()
217 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type]; in flush_latency_buckets()
229 unsigned int sched_domain, unsigned int type, in calculate_percentile() argument
232 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in calculate_percentile()
245 if (!kqd->latency_timeout[sched_domain]) in calculate_percentile()
246 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL); in calculate_percentile()
248 time_is_after_jiffies(kqd->latency_timeout[sched_domain])) { in calculate_percentile()
251 kqd->latency_timeout[sched_domain] = 0; in calculate_percentile()
259 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); in calculate_percentile()
[all …]
/Linux-v5.15/kernel/sched/
Dtopology.c34 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, in sched_domain_debug_one()
130 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug()
170 static int sd_degenerate(struct sched_domain *sd) in sd_degenerate()
188 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) in sd_parent_degenerate()
604 static void destroy_sched_domain(struct sched_domain *sd) in destroy_sched_domain()
620 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); in destroy_sched_domains_rcu()
623 struct sched_domain *parent = sd->parent; in destroy_sched_domains_rcu()
629 static void destroy_sched_domains(struct sched_domain *sd) in destroy_sched_domains()
644 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
648 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
[all …]
Dsched.h1002 struct sched_domain __rcu *sd;
1752 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) in highest_flag_domain()
1754 struct sched_domain *sd, *hsd = NULL; in highest_flag_domain()
1765 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) in lowest_flag_domain()
1767 struct sched_domain *sd; in lowest_flag_domain()
1777 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
1781 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
1782 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
1783 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
2251 extern void update_group_capacity(struct sched_domain *sd, int cpu);
Dfair.c1979 struct sched_domain *sd; in task_numa_migrate()
5947 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, in wake_affine_weight()
5989 static int wake_affine(struct sched_domain *sd, struct task_struct *p, in wake_affine()
6010 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
6072 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, in find_idlest_cpu()
6089 struct sched_domain *tmp; in find_idlest_cpu()
6226 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_smt()
6257 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_smt()
6269 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int … in select_idle_cpu()
6275 struct sched_domain *this_sd; in select_idle_cpu()
[all …]
Dstats.c25 struct sched_domain *sd; in show_schedstat()
Ddebug.c375 static void register_sd(struct sched_domain *sd, struct dentry *parent) in register_sd()
414 struct sched_domain *sd; in update_sched_domain_debugfs()
Dcore.c1009 struct sched_domain *sd; in get_nohz_timer_target()
3494 struct sched_domain *sd; in ttwu_stat()
5942 static bool steal_cookie_task(int cpu, struct sched_domain *sd) in steal_cookie_task()
5962 struct sched_domain *sd; in sched_core_balance()
Ddeadline.c1966 struct sched_domain *sd; in find_later_rq()
Drt.c1703 struct sched_domain *sd; in find_lowest_rq()
/Linux-v5.15/include/linux/sched/
Dtopology.h79 struct sched_domain { struct
81 struct sched_domain __rcu *parent; /* top domain must be null terminated */ argument
82 struct sched_domain __rcu *child; /* bottom domain must be null terminated */ argument
156 static inline struct cpumask *sched_domain_span(struct sched_domain *sd) in sched_domain_span() argument
180 struct sched_domain *__percpu *sd;
/Linux-v5.15/Documentation/scheduler/
Dsched-domains.rst5 Each CPU has a "base" scheduling domain (struct sched_domain). The domain
45 the parent sched_domain (if it exists), and the parent of the parent and so
70 of a sched_domain.
Dsched-capacity.rst287 - The SD_ASYM_CPUCAPACITY_FULL flag will be set at the lowest sched_domain
289 - The SD_ASYM_CPUCAPACITY flag will be set for any sched_domain that spans
318 sched_asym_cpucapacity static key will be enabled. However, the sched_domain
327 the sched_domain hierarchy (if relevant, i.e. the codepath targets a specific
Dsched-energy.rst335 flag to be set in the sched_domain hierarchy.