Lines Matching refs:group

182 static void group_init(struct psi_group *group)  in group_init()  argument
187 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); in group_init()
188 group->avg_next_update = sched_clock() + psi_period; in group_init()
189 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); in group_init()
190 mutex_init(&group->avgs_lock); in group_init()
192 atomic_set(&group->poll_scheduled, 0); in group_init()
193 mutex_init(&group->trigger_lock); in group_init()
194 INIT_LIST_HEAD(&group->triggers); in group_init()
195 memset(group->nr_triggers, 0, sizeof(group->nr_triggers)); in group_init()
196 group->poll_states = 0; in group_init()
197 group->poll_min_period = U32_MAX; in group_init()
198 memset(group->polling_total, 0, sizeof(group->polling_total)); in group_init()
199 group->polling_next_update = ULLONG_MAX; in group_init()
200 group->polling_until = 0; in group_init()
201 rcu_assign_pointer(group->poll_kworker, NULL); in group_init()
236 static void get_recent_times(struct psi_group *group, int cpu, in get_recent_times() argument
240 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); in get_recent_times()
301 static void collect_percpu_times(struct psi_group *group, in collect_percpu_times() argument
324 get_recent_times(group, cpu, aggregator, times, in collect_percpu_times()
349 group->total[aggregator][s] += in collect_percpu_times()
356 static u64 update_averages(struct psi_group *group, u64 now) in update_averages() argument
364 expires = group->avg_next_update; in update_averages()
376 period = now - (group->avg_last_update + (missed_periods * psi_period)); in update_averages()
377 group->avg_last_update = now; in update_averages()
382 sample = group->total[PSI_AVGS][s] - group->avg_total[s]; in update_averages()
402 group->avg_total[s] += sample; in update_averages()
403 calc_avgs(group->avg[s], missed_periods, sample, period); in update_averages()
412 struct psi_group *group; in psi_avgs_work() local
418 group = container_of(dwork, struct psi_group, avgs_work); in psi_avgs_work()
420 mutex_lock(&group->avgs_lock); in psi_avgs_work()
424 collect_percpu_times(group, PSI_AVGS, &changed_states); in psi_avgs_work()
433 if (now >= group->avg_next_update) in psi_avgs_work()
434 group->avg_next_update = update_averages(group, now); in psi_avgs_work()
438 group->avg_next_update - now) + 1); in psi_avgs_work()
441 mutex_unlock(&group->avgs_lock); in psi_avgs_work()
490 static void init_triggers(struct psi_group *group, u64 now) in init_triggers() argument
494 list_for_each_entry(t, &group->triggers, node) in init_triggers()
496 group->total[PSI_POLL][t->state], 0); in init_triggers()
497 memcpy(group->polling_total, group->total[PSI_POLL], in init_triggers()
498 sizeof(group->polling_total)); in init_triggers()
499 group->polling_next_update = now + group->poll_min_period; in init_triggers()
502 static u64 update_triggers(struct psi_group *group, u64 now) in update_triggers() argument
506 u64 *total = group->total[PSI_POLL]; in update_triggers()
512 list_for_each_entry(t, &group->triggers, node) { in update_triggers()
516 if (group->polling_total[t->state] == total[t->state]) in update_triggers()
543 memcpy(group->polling_total, total, in update_triggers()
544 sizeof(group->polling_total)); in update_triggers()
546 return now + group->poll_min_period; in update_triggers()
555 static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay) in psi_schedule_poll_work() argument
560 if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0) in psi_schedule_poll_work()
565 kworker = rcu_dereference(group->poll_kworker); in psi_schedule_poll_work()
571 kthread_queue_delayed_work(kworker, &group->poll_work, delay); in psi_schedule_poll_work()
573 atomic_set(&group->poll_scheduled, 0); in psi_schedule_poll_work()
581 struct psi_group *group; in psi_poll_work() local
586 group = container_of(dwork, struct psi_group, poll_work); in psi_poll_work()
588 atomic_set(&group->poll_scheduled, 0); in psi_poll_work()
590 mutex_lock(&group->trigger_lock); in psi_poll_work()
594 collect_percpu_times(group, PSI_POLL, &changed_states); in psi_poll_work()
596 if (changed_states & group->poll_states) { in psi_poll_work()
598 if (now > group->polling_until) in psi_poll_work()
599 init_triggers(group, now); in psi_poll_work()
606 group->polling_until = now + in psi_poll_work()
607 group->poll_min_period * UPDATES_PER_WINDOW; in psi_poll_work()
610 if (now > group->polling_until) { in psi_poll_work()
611 group->polling_next_update = ULLONG_MAX; in psi_poll_work()
615 if (now >= group->polling_next_update) in psi_poll_work()
616 group->polling_next_update = update_triggers(group, now); in psi_poll_work()
618 psi_schedule_poll_work(group, in psi_poll_work()
619 nsecs_to_jiffies(group->polling_next_update - now) + 1); in psi_poll_work()
622 mutex_unlock(&group->trigger_lock); in psi_poll_work()
671 static u32 psi_group_change(struct psi_group *group, int cpu, in psi_group_change() argument
679 groupc = per_cpu_ptr(group->pcpu, cpu); in psi_group_change()
749 struct psi_group *group; in psi_task_change() local
779 while ((group = iterate_groups(task, &iter))) { in psi_task_change()
780 u32 state_mask = psi_group_change(group, cpu, clear, set); in psi_task_change()
782 if (state_mask & group->poll_states) in psi_task_change()
783 psi_schedule_poll_work(group, 1); in psi_task_change()
785 if (wake_clock && !delayed_work_pending(&group->avgs_work)) in psi_task_change()
786 schedule_delayed_work(&group->avgs_work, PSI_FREQ); in psi_task_change()
792 struct psi_group *group; in psi_memstall_tick() local
795 while ((group = iterate_groups(task, &iter))) { in psi_memstall_tick()
798 groupc = per_cpu_ptr(group->pcpu, cpu); in psi_memstall_tick()
939 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) in psi_show() argument
948 mutex_lock(&group->avgs_lock); in psi_show()
950 collect_percpu_times(group, PSI_AVGS, NULL); in psi_show()
951 if (now >= group->avg_next_update) in psi_show()
952 group->avg_next_update = update_averages(group, now); in psi_show()
953 mutex_unlock(&group->avgs_lock); in psi_show()
961 avg[w] = group->avg[res * 2 + full][w]; in psi_show()
962 total = div_u64(group->total[PSI_AVGS][res * 2 + full], in psi_show()
1006 struct psi_trigger *psi_trigger_create(struct psi_group *group, in psi_trigger_create() argument
1039 t->group = group; in psi_trigger_create()
1050 mutex_lock(&group->trigger_lock); in psi_trigger_create()
1052 if (!rcu_access_pointer(group->poll_kworker)) { in psi_trigger_create()
1061 mutex_unlock(&group->trigger_lock); in psi_trigger_create()
1065 kthread_init_delayed_work(&group->poll_work, in psi_trigger_create()
1067 rcu_assign_pointer(group->poll_kworker, kworker); in psi_trigger_create()
1070 list_add(&t->node, &group->triggers); in psi_trigger_create()
1071 group->poll_min_period = min(group->poll_min_period, in psi_trigger_create()
1073 group->nr_triggers[t->state]++; in psi_trigger_create()
1074 group->poll_states |= (1 << t->state); in psi_trigger_create()
1076 mutex_unlock(&group->trigger_lock); in psi_trigger_create()
1084 struct psi_group *group = t->group; in psi_trigger_destroy() local
1096 mutex_lock(&group->trigger_lock); in psi_trigger_destroy()
1103 group->nr_triggers[t->state]--; in psi_trigger_destroy()
1104 if (!group->nr_triggers[t->state]) in psi_trigger_destroy()
1105 group->poll_states &= ~(1 << t->state); in psi_trigger_destroy()
1107 list_for_each_entry(tmp, &group->triggers, node) in psi_trigger_destroy()
1110 group->poll_min_period = period; in psi_trigger_destroy()
1112 if (group->poll_states == 0) { in psi_trigger_destroy()
1113 group->polling_until = 0; in psi_trigger_destroy()
1115 group->poll_kworker, in psi_trigger_destroy()
1116 lockdep_is_held(&group->trigger_lock)); in psi_trigger_destroy()
1117 rcu_assign_pointer(group->poll_kworker, NULL); in psi_trigger_destroy()
1121 mutex_unlock(&group->trigger_lock); in psi_trigger_destroy()
1140 kthread_cancel_delayed_work_sync(&group->poll_work); in psi_trigger_destroy()
1141 atomic_set(&group->poll_scheduled, 0); in psi_trigger_destroy()