Lines Matching refs:sg_cpu

164 static void sugov_get_util(struct sugov_cpu *sg_cpu)  in sugov_get_util()  argument
166 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util()
167 unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); in sugov_get_util()
169 sg_cpu->max = max; in sugov_get_util()
170 sg_cpu->bw_dl = cpu_bw_dl(rq); in sugov_get_util()
171 sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max, in sugov_get_util()
186 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, in sugov_iowait_reset() argument
189 s64 delta_ns = time - sg_cpu->last_update; in sugov_iowait_reset()
195 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0; in sugov_iowait_reset()
196 sg_cpu->iowait_boost_pending = set_iowait_boost; in sugov_iowait_reset()
215 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, in sugov_iowait_boost() argument
221 if (sg_cpu->iowait_boost && in sugov_iowait_boost()
222 sugov_iowait_reset(sg_cpu, time, set_iowait_boost)) in sugov_iowait_boost()
230 if (sg_cpu->iowait_boost_pending) in sugov_iowait_boost()
232 sg_cpu->iowait_boost_pending = true; in sugov_iowait_boost()
235 if (sg_cpu->iowait_boost) { in sugov_iowait_boost()
236 sg_cpu->iowait_boost = in sugov_iowait_boost()
237 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); in sugov_iowait_boost()
242 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN; in sugov_iowait_boost()
262 static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time) in sugov_iowait_apply() argument
267 if (!sg_cpu->iowait_boost) in sugov_iowait_apply()
271 if (sugov_iowait_reset(sg_cpu, time, false)) in sugov_iowait_apply()
274 if (!sg_cpu->iowait_boost_pending) { in sugov_iowait_apply()
278 sg_cpu->iowait_boost >>= 1; in sugov_iowait_apply()
279 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { in sugov_iowait_apply()
280 sg_cpu->iowait_boost = 0; in sugov_iowait_apply()
285 sg_cpu->iowait_boost_pending = false; in sugov_iowait_apply()
291 boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT; in sugov_iowait_apply()
292 if (sg_cpu->util < boost) in sugov_iowait_apply()
293 sg_cpu->util = boost; in sugov_iowait_apply()
297 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) in sugov_cpu_is_busy() argument
299 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); in sugov_cpu_is_busy()
300 bool ret = idle_calls == sg_cpu->saved_idle_calls; in sugov_cpu_is_busy()
302 sg_cpu->saved_idle_calls = idle_calls; in sugov_cpu_is_busy()
306 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } in sugov_cpu_is_busy() argument
313 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) in ignore_dl_rate_limit() argument
315 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) in ignore_dl_rate_limit()
316 sg_cpu->sg_policy->limits_changed = true; in ignore_dl_rate_limit()
319 static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, in sugov_update_single_common() argument
322 sugov_iowait_boost(sg_cpu, time, flags); in sugov_update_single_common()
323 sg_cpu->last_update = time; in sugov_update_single_common()
325 ignore_dl_rate_limit(sg_cpu); in sugov_update_single_common()
327 if (!sugov_should_update_freq(sg_cpu->sg_policy, time)) in sugov_update_single_common()
330 sugov_get_util(sg_cpu); in sugov_update_single_common()
331 sugov_iowait_apply(sg_cpu, time); in sugov_update_single_common()
339 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); in sugov_update_single_freq() local
340 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_update_single_freq()
344 if (!sugov_update_single_common(sg_cpu, time, flags)) in sugov_update_single_freq()
347 next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max); in sugov_update_single_freq()
352 if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) { in sugov_update_single_freq()
379 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); in sugov_update_single_perf() local
380 unsigned long prev_util = sg_cpu->util; in sugov_update_single_perf()
392 if (!sugov_update_single_common(sg_cpu, time, flags)) in sugov_update_single_perf()
399 if (sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util) in sugov_update_single_perf()
400 sg_cpu->util = prev_util; in sugov_update_single_perf()
402 cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl), in sugov_update_single_perf()
403 map_util_perf(sg_cpu->util), sg_cpu->max); in sugov_update_single_perf()
405 sg_cpu->sg_policy->last_freq_update_time = time; in sugov_update_single_perf()
408 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) in sugov_next_freq_shared() argument
410 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_next_freq_shared()
436 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); in sugov_update_shared() local
437 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_update_shared()
442 sugov_iowait_boost(sg_cpu, time, flags); in sugov_update_shared()
443 sg_cpu->last_update = time; in sugov_update_shared()
445 ignore_dl_rate_limit(sg_cpu); in sugov_update_shared()
448 next_f = sugov_next_freq_shared(sg_cpu, time); in sugov_update_shared()
768 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); in sugov_start() local
770 memset(sg_cpu, 0, sizeof(*sg_cpu)); in sugov_start()
771 sg_cpu->cpu = cpu; in sugov_start()
772 sg_cpu->sg_policy = sg_policy; in sugov_start()
783 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); in sugov_start() local
785 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu); in sugov_start()