Lines Matching refs:sg_cpu

157 static void sugov_get_util(struct sugov_cpu *sg_cpu)  in sugov_get_util()  argument
159 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util()
161 sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu); in sugov_get_util()
162 sg_cpu->bw_dl = cpu_bw_dl(rq); in sugov_get_util()
163 sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu), in sugov_get_util()
178 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, in sugov_iowait_reset() argument
181 s64 delta_ns = time - sg_cpu->last_update; in sugov_iowait_reset()
187 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0; in sugov_iowait_reset()
188 sg_cpu->iowait_boost_pending = set_iowait_boost; in sugov_iowait_reset()
207 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, in sugov_iowait_boost() argument
213 if (sg_cpu->iowait_boost && in sugov_iowait_boost()
214 sugov_iowait_reset(sg_cpu, time, set_iowait_boost)) in sugov_iowait_boost()
222 if (sg_cpu->iowait_boost_pending) in sugov_iowait_boost()
224 sg_cpu->iowait_boost_pending = true; in sugov_iowait_boost()
227 if (sg_cpu->iowait_boost) { in sugov_iowait_boost()
228 sg_cpu->iowait_boost = in sugov_iowait_boost()
229 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); in sugov_iowait_boost()
234 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN; in sugov_iowait_boost()
254 static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time) in sugov_iowait_apply() argument
259 if (!sg_cpu->iowait_boost) in sugov_iowait_apply()
263 if (sugov_iowait_reset(sg_cpu, time, false)) in sugov_iowait_apply()
266 if (!sg_cpu->iowait_boost_pending) { in sugov_iowait_apply()
270 sg_cpu->iowait_boost >>= 1; in sugov_iowait_apply()
271 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { in sugov_iowait_apply()
272 sg_cpu->iowait_boost = 0; in sugov_iowait_apply()
277 sg_cpu->iowait_boost_pending = false; in sugov_iowait_apply()
283 boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT; in sugov_iowait_apply()
284 boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL); in sugov_iowait_apply()
285 if (sg_cpu->util < boost) in sugov_iowait_apply()
286 sg_cpu->util = boost; in sugov_iowait_apply()
290 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) in sugov_cpu_is_busy() argument
292 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); in sugov_cpu_is_busy()
293 bool ret = idle_calls == sg_cpu->saved_idle_calls; in sugov_cpu_is_busy()
295 sg_cpu->saved_idle_calls = idle_calls; in sugov_cpu_is_busy()
299 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } in sugov_cpu_is_busy() argument
306 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) in ignore_dl_rate_limit() argument
308 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) in ignore_dl_rate_limit()
309 sg_cpu->sg_policy->limits_changed = true; in ignore_dl_rate_limit()
312 static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, in sugov_update_single_common() argument
315 sugov_iowait_boost(sg_cpu, time, flags); in sugov_update_single_common()
316 sg_cpu->last_update = time; in sugov_update_single_common()
318 ignore_dl_rate_limit(sg_cpu); in sugov_update_single_common()
320 if (!sugov_should_update_freq(sg_cpu->sg_policy, time)) in sugov_update_single_common()
323 sugov_get_util(sg_cpu); in sugov_update_single_common()
324 sugov_iowait_apply(sg_cpu, time); in sugov_update_single_common()
332 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); in sugov_update_single_freq() local
333 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_update_single_freq()
337 if (!sugov_update_single_common(sg_cpu, time, flags)) in sugov_update_single_freq()
340 next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max); in sugov_update_single_freq()
347 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && in sugov_update_single_freq()
348 sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) { in sugov_update_single_freq()
375 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); in sugov_update_single_perf() local
376 unsigned long prev_util = sg_cpu->util; in sugov_update_single_perf()
388 if (!sugov_update_single_common(sg_cpu, time, flags)) in sugov_update_single_perf()
397 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && in sugov_update_single_perf()
398 sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util) in sugov_update_single_perf()
399 sg_cpu->util = prev_util; in sugov_update_single_perf()
401 cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl), in sugov_update_single_perf()
402 map_util_perf(sg_cpu->util), sg_cpu->max); in sugov_update_single_perf()
404 sg_cpu->sg_policy->last_freq_update_time = time; in sugov_update_single_perf()
407 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) in sugov_next_freq_shared() argument
409 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_next_freq_shared()
435 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); in sugov_update_shared() local
436 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_update_shared()
441 sugov_iowait_boost(sg_cpu, time, flags); in sugov_update_shared()
442 sg_cpu->last_update = time; in sugov_update_shared()
444 ignore_dl_rate_limit(sg_cpu); in sugov_update_shared()
447 next_f = sugov_next_freq_shared(sg_cpu, time); in sugov_update_shared()
767 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); in sugov_start() local
769 memset(sg_cpu, 0, sizeof(*sg_cpu)); in sugov_start()
770 sg_cpu->cpu = cpu; in sugov_start()
771 sg_cpu->sg_policy = sg_policy; in sugov_start()
782 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); in sugov_start() local
784 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu); in sugov_start()