Lines Matching refs:coupled
150 int n = dev->coupled->online_count; in cpuidle_coupled_parallel_barrier()
205 static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled) in cpuidle_coupled_set_ready() argument
207 atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); in cpuidle_coupled_set_ready()
225 inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled) in cpuidle_coupled_set_not_ready() argument
230 all = coupled->online_count | (coupled->online_count << WAITING_BITS); in cpuidle_coupled_set_not_ready()
231 ret = atomic_add_unless(&coupled->ready_waiting_counts, in cpuidle_coupled_set_not_ready()
243 static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled) in cpuidle_coupled_no_cpus_ready() argument
245 int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; in cpuidle_coupled_no_cpus_ready()
255 static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled) in cpuidle_coupled_cpus_ready() argument
257 int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; in cpuidle_coupled_cpus_ready()
258 return r == coupled->online_count; in cpuidle_coupled_cpus_ready()
267 static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled) in cpuidle_coupled_cpus_waiting() argument
269 int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; in cpuidle_coupled_cpus_waiting()
270 return w == coupled->online_count; in cpuidle_coupled_cpus_waiting()
279 static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled) in cpuidle_coupled_no_cpus_waiting() argument
281 int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; in cpuidle_coupled_no_cpus_waiting()
293 struct cpuidle_coupled *coupled) in cpuidle_coupled_get_state() argument
305 for_each_cpu(i, &coupled->coupled_cpus) in cpuidle_coupled_get_state()
306 if (cpu_online(i) && coupled->requested_state[i] < state) in cpuidle_coupled_get_state()
307 state = coupled->requested_state[i]; in cpuidle_coupled_get_state()
347 struct cpuidle_coupled *coupled) in cpuidle_coupled_poke_others() argument
351 for_each_cpu(cpu, &coupled->coupled_cpus) in cpuidle_coupled_poke_others()
366 struct cpuidle_coupled *coupled, int next_state) in cpuidle_coupled_set_waiting() argument
368 coupled->requested_state[cpu] = next_state; in cpuidle_coupled_set_waiting()
374 return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK; in cpuidle_coupled_set_waiting()
385 struct cpuidle_coupled *coupled) in cpuidle_coupled_set_not_waiting() argument
393 atomic_dec(&coupled->ready_waiting_counts); in cpuidle_coupled_set_not_waiting()
395 coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE; in cpuidle_coupled_set_not_waiting()
407 static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) in cpuidle_coupled_set_done() argument
409 cpuidle_coupled_set_not_waiting(cpu, coupled); in cpuidle_coupled_set_done()
410 atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); in cpuidle_coupled_set_done()
439 static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled) in cpuidle_coupled_any_pokes_pending() argument
444 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); in cpuidle_coupled_any_pokes_pending()
473 struct cpuidle_coupled *coupled = dev->coupled; in cpuidle_enter_state_coupled() local
476 if (!coupled) in cpuidle_enter_state_coupled()
479 while (coupled->prevent) { in cpuidle_enter_state_coupled()
496 w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); in cpuidle_enter_state_coupled()
504 if (w == coupled->online_count) { in cpuidle_enter_state_coupled()
506 cpuidle_coupled_poke_others(dev->cpu, coupled); in cpuidle_enter_state_coupled()
518 while (!cpuidle_coupled_cpus_waiting(coupled) || in cpuidle_enter_state_coupled()
524 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); in cpuidle_enter_state_coupled()
528 if (coupled->prevent) { in cpuidle_enter_state_coupled()
529 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); in cpuidle_enter_state_coupled()
540 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); in cpuidle_enter_state_coupled()
559 cpuidle_coupled_set_ready(coupled); in cpuidle_enter_state_coupled()
560 while (!cpuidle_coupled_cpus_ready(coupled)) { in cpuidle_enter_state_coupled()
562 if (!cpuidle_coupled_cpus_waiting(coupled)) in cpuidle_enter_state_coupled()
563 if (!cpuidle_coupled_set_not_ready(coupled)) in cpuidle_enter_state_coupled()
584 if (cpuidle_coupled_any_pokes_pending(coupled)) { in cpuidle_enter_state_coupled()
585 cpuidle_coupled_set_done(dev->cpu, coupled); in cpuidle_enter_state_coupled()
587 cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier); in cpuidle_enter_state_coupled()
592 next_state = cpuidle_coupled_get_state(dev, coupled); in cpuidle_enter_state_coupled()
596 cpuidle_coupled_set_done(dev->cpu, coupled); in cpuidle_enter_state_coupled()
620 while (!cpuidle_coupled_no_cpus_ready(coupled)) in cpuidle_enter_state_coupled()
626 static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled) in cpuidle_coupled_update_online_cpus() argument
629 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); in cpuidle_coupled_update_online_cpus()
630 coupled->online_count = cpumask_weight(&cpus); in cpuidle_coupled_update_online_cpus()
646 struct cpuidle_coupled *coupled; in cpuidle_coupled_register_device() local
653 if (other_dev && other_dev->coupled) { in cpuidle_coupled_register_device()
654 coupled = other_dev->coupled; in cpuidle_coupled_register_device()
660 coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL); in cpuidle_coupled_register_device()
661 if (!coupled) in cpuidle_coupled_register_device()
664 coupled->coupled_cpus = dev->coupled_cpus; in cpuidle_coupled_register_device()
667 dev->coupled = coupled; in cpuidle_coupled_register_device()
668 if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus))) in cpuidle_coupled_register_device()
669 coupled->prevent++; in cpuidle_coupled_register_device()
671 cpuidle_coupled_update_online_cpus(coupled); in cpuidle_coupled_register_device()
673 coupled->refcnt++; in cpuidle_coupled_register_device()
692 struct cpuidle_coupled *coupled = dev->coupled; in cpuidle_coupled_unregister_device() local
697 if (--coupled->refcnt) in cpuidle_coupled_unregister_device()
698 kfree(coupled); in cpuidle_coupled_unregister_device()
699 dev->coupled = NULL; in cpuidle_coupled_unregister_device()
709 static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled) in cpuidle_coupled_prevent_idle() argument
714 coupled->prevent++; in cpuidle_coupled_prevent_idle()
715 cpuidle_coupled_poke_others(cpu, coupled); in cpuidle_coupled_prevent_idle()
717 while (!cpuidle_coupled_no_cpus_waiting(coupled)) in cpuidle_coupled_prevent_idle()
728 static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled) in cpuidle_coupled_allow_idle() argument
737 coupled->prevent--; in cpuidle_coupled_allow_idle()
739 cpuidle_coupled_poke_others(cpu, coupled); in cpuidle_coupled_allow_idle()
750 if (dev && dev->coupled) { in coupled_cpu_online()
751 cpuidle_coupled_update_online_cpus(dev->coupled); in coupled_cpu_online()
752 cpuidle_coupled_allow_idle(dev->coupled); in coupled_cpu_online()
766 if (dev && dev->coupled) in coupled_cpu_up_prepare()
767 cpuidle_coupled_prevent_idle(dev->coupled); in coupled_cpu_up_prepare()