Lines Matching full:rtp
16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
204 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) in set_tasks_gp_state() argument
206 rtp->gp_state = newstate; in set_tasks_gp_state()
207 rtp->gp_jiffies = jiffies; in set_tasks_gp_state()
212 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) in tasks_gp_state_getname() argument
214 int i = data_race(rtp->gp_state); // Let KCSAN detect update races in tasks_gp_state_getname()
225 static void cblist_init_generic(struct rcu_tasks *rtp) in cblist_init_generic() argument
232 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); in cblist_init_generic()
247 WRITE_ONCE(rtp->percpu_enqueue_shift, shift); in cblist_init_generic()
248 WRITE_ONCE(rtp->percpu_dequeue_lim, lim); in cblist_init_generic()
249 smp_store_release(&rtp->percpu_enqueue_lim, lim); in cblist_init_generic()
251 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in cblist_init_generic()
261 rtpcp->rtpp = rtp; in cblist_init_generic()
266 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); in cblist_init_generic()
267 …ting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp-… in cblist_init_generic()
273 struct rcu_tasks *rtp; in call_rcu_tasks_iw_wakeup() local
276 rtp = rtpcp->rtpp; in call_rcu_tasks_iw_wakeup()
277 rcuwait_wake_up(&rtp->cbs_wait); in call_rcu_tasks_iw_wakeup()
282 struct rcu_tasks *rtp) in call_rcu_tasks_generic() argument
296 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift); in call_rcu_tasks_generic()
298 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); in call_rcu_tasks_generic()
307 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids) in call_rcu_tasks_generic()
312 cblist_init_generic(rtp); in call_rcu_tasks_generic()
319 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); in call_rcu_tasks_generic()
320 if (rtp->percpu_enqueue_lim != nr_cpu_ids) { in call_rcu_tasks_generic()
321 WRITE_ONCE(rtp->percpu_enqueue_shift, 0); in call_rcu_tasks_generic()
322 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids); in call_rcu_tasks_generic()
323 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); in call_rcu_tasks_generic()
324 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); in call_rcu_tasks_generic()
326 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); in call_rcu_tasks_generic()
330 if (needwake && READ_ONCE(rtp->kthread_ptr)) in call_rcu_tasks_generic()
337 struct rcu_tasks *rtp; in rcu_barrier_tasks_generic_cb() local
341 rtp = rtpcp->rtpp; in rcu_barrier_tasks_generic_cb()
342 if (atomic_dec_and_test(&rtp->barrier_q_count)) in rcu_barrier_tasks_generic_cb()
343 complete(&rtp->barrier_q_completion); in rcu_barrier_tasks_generic_cb()
348 static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp) in rcu_barrier_tasks_generic() argument
353 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq); in rcu_barrier_tasks_generic()
355 mutex_lock(&rtp->barrier_q_mutex); in rcu_barrier_tasks_generic()
356 if (rcu_seq_done(&rtp->barrier_q_seq, s)) { in rcu_barrier_tasks_generic()
358 mutex_unlock(&rtp->barrier_q_mutex); in rcu_barrier_tasks_generic()
361 rcu_seq_start(&rtp->barrier_q_seq); in rcu_barrier_tasks_generic()
362 init_completion(&rtp->barrier_q_completion); in rcu_barrier_tasks_generic()
363 atomic_set(&rtp->barrier_q_count, 2); in rcu_barrier_tasks_generic()
365 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) in rcu_barrier_tasks_generic()
367 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_barrier_tasks_generic()
371 atomic_inc(&rtp->barrier_q_count); in rcu_barrier_tasks_generic()
374 if (atomic_sub_and_test(2, &rtp->barrier_q_count)) in rcu_barrier_tasks_generic()
375 complete(&rtp->barrier_q_completion); in rcu_barrier_tasks_generic()
376 wait_for_completion(&rtp->barrier_q_completion); in rcu_barrier_tasks_generic()
377 rcu_seq_end(&rtp->barrier_q_seq); in rcu_barrier_tasks_generic()
378 mutex_unlock(&rtp->barrier_q_mutex); in rcu_barrier_tasks_generic()
383 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) in rcu_tasks_need_gpcb() argument
392 for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) { in rcu_tasks_need_gpcb()
393 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_need_gpcb()
406 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); in rcu_tasks_need_gpcb()
407 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); in rcu_tasks_need_gpcb()
423 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); in rcu_tasks_need_gpcb()
424 if (rtp->percpu_enqueue_lim > 1) { in rcu_tasks_need_gpcb()
425 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids)); in rcu_tasks_need_gpcb()
426 smp_store_release(&rtp->percpu_enqueue_lim, 1); in rcu_tasks_need_gpcb()
427 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); in rcu_tasks_need_gpcb()
428 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); in rcu_tasks_need_gpcb()
430 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); in rcu_tasks_need_gpcb()
433 poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) { in rcu_tasks_need_gpcb()
434 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); in rcu_tasks_need_gpcb()
435 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { in rcu_tasks_need_gpcb()
436 WRITE_ONCE(rtp->percpu_dequeue_lim, 1); in rcu_tasks_need_gpcb()
437 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name); in rcu_tasks_need_gpcb()
439 for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) { in rcu_tasks_need_gpcb()
440 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_need_gpcb()
444 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); in rcu_tasks_need_gpcb()
451 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp) in rcu_tasks_invoke_cbs() argument
463 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { in rcu_tasks_invoke_cbs()
464 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); in rcu_tasks_invoke_cbs()
467 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { in rcu_tasks_invoke_cbs()
468 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); in rcu_tasks_invoke_cbs()
476 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); in rcu_tasks_invoke_cbs()
488 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); in rcu_tasks_invoke_cbs()
495 struct rcu_tasks *rtp; in rcu_tasks_invoke_cbs_wq() local
498 rtp = rtpcp->rtpp; in rcu_tasks_invoke_cbs_wq()
499 rcu_tasks_invoke_cbs(rtp, rtpcp); in rcu_tasks_invoke_cbs_wq()
503 static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot) in rcu_tasks_one_gp() argument
507 mutex_lock(&rtp->tasks_gp_mutex); in rcu_tasks_one_gp()
513 set_tasks_gp_state(rtp, RTGS_WAIT_CBS); in rcu_tasks_one_gp()
514 rcuwait_wait_event(&rtp->cbs_wait, in rcu_tasks_one_gp()
515 (needgpcb = rcu_tasks_need_gpcb(rtp)), in rcu_tasks_one_gp()
521 set_tasks_gp_state(rtp, RTGS_WAIT_GP); in rcu_tasks_one_gp()
522 rtp->gp_start = jiffies; in rcu_tasks_one_gp()
523 rcu_seq_start(&rtp->tasks_gp_seq); in rcu_tasks_one_gp()
524 rtp->gp_func(rtp); in rcu_tasks_one_gp()
525 rcu_seq_end(&rtp->tasks_gp_seq); in rcu_tasks_one_gp()
529 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); in rcu_tasks_one_gp()
530 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); in rcu_tasks_one_gp()
531 mutex_unlock(&rtp->tasks_gp_mutex); in rcu_tasks_one_gp()
537 struct rcu_tasks *rtp = arg; in rcu_tasks_kthread() local
541 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start! in rcu_tasks_kthread()
552 rcu_tasks_one_gp(rtp, false); in rcu_tasks_kthread()
555 schedule_timeout_idle(rtp->gp_sleep); in rcu_tasks_kthread()
560 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) in synchronize_rcu_tasks_generic() argument
567 if (READ_ONCE(rtp->kthread_ptr)) { in synchronize_rcu_tasks_generic()
568 wait_rcu_gp(rtp->call_func); in synchronize_rcu_tasks_generic()
571 rcu_tasks_one_gp(rtp, true); in synchronize_rcu_tasks_generic()
575 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) in rcu_spawn_tasks_kthread_generic() argument
579 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); in rcu_spawn_tasks_kthread_generic()
580 …%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) in rcu_spawn_tasks_kthread_generic()
618 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) in show_rcu_tasks_generic_gp_kthread() argument
624 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in show_rcu_tasks_generic_gp_kthread()
632 rtp->kname, in show_rcu_tasks_generic_gp_kthread()
633 tasks_gp_state_getname(rtp), data_race(rtp->gp_state), in show_rcu_tasks_generic_gp_kthread()
634 jiffies - data_race(rtp->gp_jiffies), in show_rcu_tasks_generic_gp_kthread()
635 data_race(rcu_seq_current(&rtp->tasks_gp_seq)), in show_rcu_tasks_generic_gp_kthread()
636 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), in show_rcu_tasks_generic_gp_kthread()
637 ".k"[!!data_race(rtp->kthread_ptr)], in show_rcu_tasks_generic_gp_kthread()
652 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) in rcu_tasks_wait_gp() argument
664 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); in rcu_tasks_wait_gp()
665 rtp->pregp_func(&holdouts); in rcu_tasks_wait_gp()
673 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); in rcu_tasks_wait_gp()
674 if (rtp->pertask_func) { in rcu_tasks_wait_gp()
677 rtp->pertask_func(t, &holdouts); in rcu_tasks_wait_gp()
681 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); in rcu_tasks_wait_gp()
682 rtp->postscan_func(&holdouts); in rcu_tasks_wait_gp()
694 fract = rtp->init_fract; in rcu_tasks_wait_gp()
703 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); in rcu_tasks_wait_gp()
723 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); in rcu_tasks_wait_gp()
724 rtp->holdouts_func(&holdouts, needreport, &firstreport); in rcu_tasks_wait_gp()
732 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start); in rcu_tasks_wait_gp()
736 set_tasks_gp_state(rtp, RTGS_POST_GP); in rcu_tasks_wait_gp()
737 rtp->postgp_func(rtp); in rcu_tasks_wait_gp()
884 static void rcu_tasks_postgp(struct rcu_tasks *rtp) in rcu_tasks_postgp() argument
1037 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) in rcu_tasks_rude_wait_gp() argument
1042 rtp->n_ipis += cpumask_weight(cpu_online_mask); in rcu_tasks_rude_wait_gp()
1642 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) in rcu_tasks_trace_postgp() argument