Lines Matching +full:boost +full:- +full:bypass
1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
31 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n", in rcu_bootup_announce_oddness()
36 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); in rcu_bootup_announce_oddness()
40 pr_info("\tRCU strict (and thus non-scalable) grace periods enabled.\n"); in rcu_bootup_announce_oddness()
42 pr_info("\tFour(or more)-level hierarchy is enabled.\n"); in rcu_bootup_announce_oddness()
44 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", in rcu_bootup_announce_oddness()
47 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", in rcu_bootup_announce_oddness()
56 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); in rcu_bootup_announce_oddness()
58 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark); in rcu_bootup_announce_oddness()
60 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark); in rcu_bootup_announce_oddness()
62 pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld); in rcu_bootup_announce_oddness()
64 …pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs… in rcu_bootup_announce_oddness()
66 …pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next… in rcu_bootup_announce_oddness()
68 …pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sch… in rcu_bootup_announce_oddness()
70 pr_info("\tKick kthreads if too-long grace period.\n"); in rcu_bootup_announce_oddness()
72 pr_info("\tRCU callback double-/use-after-free debug enabled.\n"); in rcu_bootup_announce_oddness()
74 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); in rcu_bootup_announce_oddness()
107 * Queues a task preempted within an RCU-preempt read-side critical
108 * section into the appropriate location within the ->blkd_tasks list,
110 * periods. The ->gp_tasks pointer indicates which element the normal
111 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
114 * ->blkd_tasks list, it also waits on all subsequent elements. Thus,
129 * their RCU read-side critical sections. At that point, the ->gp_tasks
130 * pointer will equal the ->exp_tasks pointer, at which point the end of
135 __releases(rnp->lock) /* But leaves rrupts disabled. */ in rcu_preempt_ctxt_queue()
137 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + in rcu_preempt_ctxt_queue()
138 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + in rcu_preempt_ctxt_queue()
139 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + in rcu_preempt_ctxt_queue()
140 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); in rcu_preempt_ctxt_queue()
144 WARN_ON_ONCE(rdp->mynode != rnp); in rcu_preempt_ctxt_queue()
147 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & in rcu_preempt_ctxt_queue()
148 rdp->grpmask); in rcu_preempt_ctxt_queue()
152 * this could be an if-statement. In practice, when I tried in rcu_preempt_ctxt_queue()
164 * GP but not blocking the already-waiting expedited GP. in rcu_preempt_ctxt_queue()
166 * blocking the already-waiting GPs. in rcu_preempt_ctxt_queue()
168 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
186 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
199 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue()
210 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue()
222 * block either grace period, update the ->gp_tasks and/or in rcu_preempt_ctxt_queue()
223 * ->exp_tasks pointers, respectively, to reference the newly in rcu_preempt_ctxt_queue()
226 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { in rcu_preempt_ctxt_queue()
227 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
228 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); in rcu_preempt_ctxt_queue()
230 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) in rcu_preempt_ctxt_queue()
231 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
233 !(rnp->qsmask & rdp->grpmask)); in rcu_preempt_ctxt_queue()
235 !(rnp->expmask & rdp->grpmask)); in rcu_preempt_ctxt_queue()
244 if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs) in rcu_preempt_ctxt_queue()
247 WARN_ON_ONCE(rdp->exp_deferred_qs); in rcu_preempt_ctxt_queue()
251 * Record a preemptible-RCU quiescent state for the specified CPU.
254 * grace period need not wait on any RCU read-side critical section that
256 * in an RCU read-side critical section, it has already added itself to
257 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the
259 * in an RCU read-side critical section.
272 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false); in rcu_qs()
278 * context-switched away from. If this task is in an RCU read-side
282 * RCU read-side critical section. Therefore, the current grace period
285 * rnp->gp_tasks becomes NULL.
299 !t->rcu_read_unlock_special.b.blocked) { in rcu_note_context_switch()
301 /* Possibly blocking in an RCU read-side critical section. */ in rcu_note_context_switch()
302 rnp = rdp->mynode; in rcu_note_context_switch()
304 t->rcu_read_unlock_special.b.blocked = true; in rcu_note_context_switch()
305 t->rcu_blocked_node = rnp; in rcu_note_context_switch()
312 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); in rcu_note_context_switch()
313 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); in rcu_note_context_switch()
315 t->pid, in rcu_note_context_switch()
316 (rnp->qsmask & rdp->grpmask) in rcu_note_context_switch()
317 ? rnp->gp_seq in rcu_note_context_switch()
318 : rcu_seq_snap(&rnp->gp_seq)); in rcu_note_context_switch()
325 * Either we were not in an RCU read-side critical section to in rcu_note_context_switch()
328 * for this CPU. Again, if we were in an RCU read-side critical in rcu_note_context_switch()
334 if (rdp->exp_deferred_qs) in rcu_note_context_switch()
344 * answer, it must hold the rcu_node's ->lock.
348 return READ_ONCE(rnp->gp_tasks) != NULL; in rcu_preempt_blocked_readers_cgp()
351 /* limit value for ->rcu_read_lock_nesting. */
356 current->rcu_read_lock_nesting++; in rcu_preempt_read_enter()
361 return --current->rcu_read_lock_nesting; in rcu_preempt_read_exit()
366 current->rcu_read_lock_nesting = val; in rcu_preempt_depth_set()
371 * Just increment ->rcu_read_lock_nesting, shared state will be updated
380 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true); in __rcu_read_lock()
387 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
388 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
390 * in an RCU read-side critical section and other special cases.
398 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) in __rcu_read_unlock()
410 * Advance a ->blkd_tasks-list pointer to the next entry, instead
418 np = t->rcu_node_entry.next; in rcu_next_node_entry()
419 if (np == &rnp->blkd_tasks) in rcu_next_node_entry()
426 * preempted within an RCU read-side critical section.
430 return !list_empty(&rnp->blkd_tasks); in rcu_preempt_has_tasks()
453 * t->rcu_read_unlock_special cannot change. in rcu_preempt_deferred_qs_irqrestore()
455 special = t->rcu_read_unlock_special; in rcu_preempt_deferred_qs_irqrestore()
457 if (!special.s && !rdp->exp_deferred_qs) { in rcu_preempt_deferred_qs_irqrestore()
461 t->rcu_read_unlock_special.s = 0; in rcu_preempt_deferred_qs_irqrestore()
475 * blocked-tasks list below. in rcu_preempt_deferred_qs_irqrestore()
477 if (rdp->exp_deferred_qs) in rcu_preempt_deferred_qs_irqrestore()
480 /* Clean up if blocked during RCU read-side critical section. */ in rcu_preempt_deferred_qs_irqrestore()
489 rnp = t->rcu_blocked_node; in rcu_preempt_deferred_qs_irqrestore()
491 WARN_ON_ONCE(rnp != t->rcu_blocked_node); in rcu_preempt_deferred_qs_irqrestore()
494 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && in rcu_preempt_deferred_qs_irqrestore()
495 (!empty_norm || rnp->qsmask)); in rcu_preempt_deferred_qs_irqrestore()
497 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ in rcu_preempt_deferred_qs_irqrestore()
499 list_del_init(&t->rcu_node_entry); in rcu_preempt_deferred_qs_irqrestore()
500 t->rcu_blocked_node = NULL; in rcu_preempt_deferred_qs_irqrestore()
502 rnp->gp_seq, t->pid); in rcu_preempt_deferred_qs_irqrestore()
503 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_preempt_deferred_qs_irqrestore()
504 WRITE_ONCE(rnp->gp_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
505 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_preempt_deferred_qs_irqrestore()
506 WRITE_ONCE(rnp->exp_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
508 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ in rcu_preempt_deferred_qs_irqrestore()
509 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; in rcu_preempt_deferred_qs_irqrestore()
510 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_preempt_deferred_qs_irqrestore()
511 WRITE_ONCE(rnp->boost_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
517 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, in rcu_preempt_deferred_qs_irqrestore()
523 rnp->gp_seq, in rcu_preempt_deferred_qs_irqrestore()
524 0, rnp->qsmask, in rcu_preempt_deferred_qs_irqrestore()
525 rnp->level, in rcu_preempt_deferred_qs_irqrestore()
526 rnp->grplo, in rcu_preempt_deferred_qs_irqrestore()
527 rnp->grphi, in rcu_preempt_deferred_qs_irqrestore()
528 !!rnp->gp_tasks); in rcu_preempt_deferred_qs_irqrestore()
536 rt_mutex_futex_unlock(&rnp->boost_mtx); in rcu_preempt_deferred_qs_irqrestore()
550 * Is a deferred quiescent-state pending, and are we also not in
551 * an RCU read-side critical section? It is the caller's responsibility
561 READ_ONCE(t->rcu_read_unlock_special.s)) && in rcu_preempt_need_deferred_qs()
568 * not being in an RCU read-side critical section. The caller must
583 * Minimal handler to give the scheduler a chance to re-evaluate.
590 rdp->defer_qs_iw_pending = false; in rcu_preempt_deferred_qs_handler()
596 * read-side critical section.
614 struct rcu_node *rnp = rdp->mynode; in rcu_read_unlock_special()
616 exp = (t->rcu_blocked_node && in rcu_read_unlock_special()
617 READ_ONCE(t->rcu_blocked_node->exp_tasks)) || in rcu_read_unlock_special()
618 (rdp->grpmask & READ_ONCE(rnp->expmask)); in rcu_read_unlock_special()
631 !rdp->defer_qs_iw_pending && exp) { in rcu_read_unlock_special()
632 // Get scheduler to re-evaluate and call hooks. in rcu_read_unlock_special()
634 init_irq_work(&rdp->defer_qs_iw, in rcu_read_unlock_special()
636 rdp->defer_qs_iw_pending = true; in rcu_read_unlock_special()
637 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); in rcu_read_unlock_special()
650 * invoked -before- updating this rnp's ->gp_seq.
653 * block the newly created grace period, so set up ->gp_tasks accordingly.
664 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { in rcu_preempt_check_blocked_tasks()
665 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next); in rcu_preempt_check_blocked_tasks()
666 t = container_of(rnp->gp_tasks, struct task_struct, in rcu_preempt_check_blocked_tasks()
668 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), in rcu_preempt_check_blocked_tasks()
669 rnp->gp_seq, t->pid); in rcu_preempt_check_blocked_tasks()
671 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
707 !t->rcu_read_unlock_special.b.need_qs && in rcu_flavor_sched_clock_irq()
709 t->rcu_read_unlock_special.b.need_qs = true; in rcu_flavor_sched_clock_irq()
713 * Check for a task exiting while in a preemptible-RCU read-side
724 if (unlikely(!list_empty(¤t->rcu_node_entry))) { in exit_rcu()
727 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true); in exit_rcu()
738 * Dump the blocked-tasks state, but limit the list dump to the
752 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", in dump_blkd_tasks()
753 __func__, rnp->grplo, rnp->grphi, rnp->level, in dump_blkd_tasks()
754 (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs); in dump_blkd_tasks()
755 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in dump_blkd_tasks()
756 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n", in dump_blkd_tasks()
757 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext); in dump_blkd_tasks()
758 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n", in dump_blkd_tasks()
759 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks), in dump_blkd_tasks()
760 READ_ONCE(rnp->exp_tasks)); in dump_blkd_tasks()
761 pr_info("%s: ->blkd_tasks", __func__); in dump_blkd_tasks()
763 list_for_each(lhp, &rnp->blkd_tasks) { in dump_blkd_tasks()
769 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { in dump_blkd_tasks()
771 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); in dump_blkd_tasks()
774 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, in dump_blkd_tasks()
775 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); in dump_blkd_tasks()
830 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
831 * dyntick-idle quiescent state visible to other CPUs, which will in
911 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
915 * Check to see if this CPU is in a non-context-switch quiescent state,
929 * references only CPU-local variables that other CPUs in rcu_flavor_sched_clock_irq()
940 * while in preemptible RCU read-side critical sections.
947 * Dump the guaranteed-empty blocked-tasks state. Trust but verify.
952 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); in dump_blkd_tasks()
973 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
974 * or ->boost_tasks, advancing the pointer to the next task in the
975 * ->blkd_tasks list.
986 if (READ_ONCE(rnp->exp_tasks) == NULL && in rcu_boost()
987 READ_ONCE(rnp->boost_tasks) == NULL) in rcu_boost()
988 return 0; /* Nothing left to boost. */ in rcu_boost()
994 * might exit their RCU read-side critical sections on their own. in rcu_boost()
996 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { in rcu_boost()
1002 * Preferentially boost tasks blocking expedited grace periods. in rcu_boost()
1004 * expedited grace period must boost all blocked tasks, including in rcu_boost()
1005 * those blocking the pre-existing normal grace period. in rcu_boost()
1007 if (rnp->exp_tasks != NULL) in rcu_boost()
1008 tb = rnp->exp_tasks; in rcu_boost()
1010 tb = rnp->boost_tasks; in rcu_boost()
1013 * We boost task t by manufacturing an rt_mutex that appears to in rcu_boost()
1016 * exits its outermost RCU read-side critical section. Then in rcu_boost()
1017 * simply acquiring this artificial rt_mutex will boost task in rcu_boost()
1020 * Note that task t must acquire rnp->lock to remove itself from in rcu_boost()
1021 * the ->blkd_tasks list, which it will do from exit() if from in rcu_boost()
1023 * stay around at least until we drop rnp->lock. Note that in rcu_boost()
1024 * rnp->lock also resolves races between our priority boosting in rcu_boost()
1025 * and task t's exiting its outermost RCU read-side critical in rcu_boost()
1029 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); in rcu_boost()
1032 rt_mutex_lock(&rnp->boost_mtx); in rcu_boost()
1033 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ in rcu_boost()
1035 return READ_ONCE(rnp->exp_tasks) != NULL || in rcu_boost()
1036 READ_ONCE(rnp->boost_tasks) != NULL; in rcu_boost()
1040 * Priority-boosting kthread, one per leaf rcu_node.
1048 trace_rcu_utilization(TPS("Start boost kthread@init")); in rcu_boost_kthread()
1050 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING); in rcu_boost_kthread()
1051 trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); in rcu_boost_kthread()
1052 rcu_wait(READ_ONCE(rnp->boost_tasks) || in rcu_boost_kthread()
1053 READ_ONCE(rnp->exp_tasks)); in rcu_boost_kthread()
1054 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); in rcu_boost_kthread()
1055 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING); in rcu_boost_kthread()
1062 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING); in rcu_boost_kthread()
1063 trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); in rcu_boost_kthread()
1065 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); in rcu_boost_kthread()
1070 trace_rcu_utilization(TPS("End boost kthread@notreached")); in rcu_boost_kthread()
1076 * blocking the current grace period, and, if so, tell the per-rcu_node
1078 * period in progress, it is always time to boost.
1080 * The caller must hold rnp->lock, which this function releases.
1081 * The ->boost_kthread_task is immortal, so we don't need to worry
1085 __releases(rnp->lock) in rcu_initiate_boost()
1088 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { in rcu_initiate_boost()
1092 if (rnp->exp_tasks != NULL || in rcu_initiate_boost()
1093 (rnp->gp_tasks != NULL && in rcu_initiate_boost()
1094 rnp->boost_tasks == NULL && in rcu_initiate_boost()
1095 rnp->qsmask == 0 && in rcu_initiate_boost()
1096 (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld))) { in rcu_initiate_boost()
1097 if (rnp->exp_tasks == NULL) in rcu_initiate_boost()
1098 WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks); in rcu_initiate_boost()
1100 rcu_wake_cond(rnp->boost_kthread_task, in rcu_initiate_boost()
1101 READ_ONCE(rnp->boost_kthread_status)); in rcu_initiate_boost()
1108 * Is the current CPU running the RCU-callbacks kthread?
1119 * Do priority-boost accounting for the start of a new grace period.
1123 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; in rcu_preempt_boost_start_gp()
1127 * Create an RCU-boost kthread for the specified node if one does not
1133 int rnp_index = rnp - rcu_get_root(); in rcu_spawn_one_boost_kthread()
1144 rcu_state.boost = 1; in rcu_spawn_one_boost_kthread()
1146 if (rnp->boost_kthread_task != NULL) in rcu_spawn_one_boost_kthread()
1155 rnp->boost_kthread_task = t; in rcu_spawn_one_boost_kthread()
1163 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1165 * held, so the value of rnp->qsmaskinit will be stable.
1167 * We don't include outgoingcpu in the affinity set, use -1 if there is
1173 struct task_struct *t = rnp->boost_kthread_task; in rcu_boost_kthread_setaffinity()
1193 * Spawn boost kthreads -- called as soon as the scheduler is running.
1206 struct rcu_node *rnp = rdp->mynode; in rcu_prepare_kthreads()
1216 __releases(rnp->lock) in rcu_initiate_boost()
1247 * Check to see if any future non-offloaded RCU-related work will need
1250 * it is -not- an exported member of the RCU API.
1258 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && in rcu_needs_cpu()
1259 !rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist); in rcu_needs_cpu()
1271 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1283 * the energy-efficient dyntick-idle mode.
1288 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1289 * is sized to be roughly one RCU grace period. Those energy-efficiency
1292 * system. And if you are -that- concerned about energy efficiency,
1316 if (jiffies == rdp->last_advance_all) in rcu_try_advance_all_cbs()
1318 rdp->last_advance_all = jiffies; in rcu_try_advance_all_cbs()
1320 rnp = rdp->mynode; in rcu_try_advance_all_cbs()
1327 if ((rcu_seq_completed_gp(rdp->gp_seq, in rcu_try_advance_all_cbs()
1328 rcu_seq_current(&rnp->gp_seq)) || in rcu_try_advance_all_cbs()
1329 unlikely(READ_ONCE(rdp->gpwrap))) && in rcu_try_advance_all_cbs()
1330 rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_try_advance_all_cbs()
1333 if (rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_try_advance_all_cbs()
1339 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1352 /* If no non-offloaded callbacks, RCU doesn't need the CPU. */ in rcu_needs_cpu()
1353 if (rcu_segcblist_empty(&rdp->cblist) || in rcu_needs_cpu()
1354 rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist)) { in rcu_needs_cpu()
1365 rdp->last_accelerate = jiffies; in rcu_needs_cpu()
1368 dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies; in rcu_needs_cpu()
1377 * major task is to accelerate (that is, assign grace-period numbers to) any
1390 if (rcu_segcblist_is_offloaded(&rdp->cblist)) in rcu_prepare_for_idle()
1395 if (tne != rdp->tick_nohz_enabled_snap) { in rcu_prepare_for_idle()
1396 if (!rcu_segcblist_empty(&rdp->cblist)) in rcu_prepare_for_idle()
1398 rdp->tick_nohz_enabled_snap = tne; in rcu_prepare_for_idle()
1408 if (rdp->last_accelerate == jiffies) in rcu_prepare_for_idle()
1410 rdp->last_accelerate = jiffies; in rcu_prepare_for_idle()
1411 if (rcu_segcblist_pend_cbs(&rdp->cblist)) { in rcu_prepare_for_idle()
1412 rnp = rdp->mynode; in rcu_prepare_for_idle()
1431 if (rcu_segcblist_is_offloaded(&rdp->cblist)) in rcu_cleanup_after_idle()
1442 * Offload callback processing from the boot-time-specified set of CPUs
1448 * invoke callbacks. Each GP kthread invokes its own CBs. The no-CBs CPUs
1455 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
1456 * running CPU-bound user-mode computations.
1458 * Offloading of callbacks can also be used as an energy-efficiency
1460 * about entering dyntick-idle mode.
1465 * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
1467 * comma-separated list of CPUs and/or CPU ranges. If an invalid list is
1492 * Don't bother bypassing ->cblist if the call_rcu() rate is low.
1494 * on ->nocb_lock, which only can happen at high call_rcu() rates.
1500 * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
1501 * lock isn't immediately available, increment ->nocb_lock_contended to
1505 __acquires(&rdp->nocb_bypass_lock) in rcu_nocb_bypass_lock()
1508 if (raw_spin_trylock(&rdp->nocb_bypass_lock)) in rcu_nocb_bypass_lock()
1510 atomic_inc(&rdp->nocb_lock_contended); in rcu_nocb_bypass_lock()
1511 WARN_ON_ONCE(smp_processor_id() != rdp->cpu); in rcu_nocb_bypass_lock()
1513 raw_spin_lock(&rdp->nocb_bypass_lock); in rcu_nocb_bypass_lock()
1515 atomic_dec(&rdp->nocb_lock_contended); in rcu_nocb_bypass_lock()
1519 * Spinwait until the specified rcu_data structure's ->nocb_lock is
1520 * not contended. Please note that this is extremely special-purpose,
1523 * grace-period-duration time intervals between successive acquisitions
1530 WARN_ON_ONCE(smp_processor_id() != rdp->cpu); in rcu_nocb_wait_contended()
1531 while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended))) in rcu_nocb_wait_contended()
1537 * ->nocb_bypass_lock.
1542 return raw_spin_trylock(&rdp->nocb_bypass_lock); in rcu_nocb_bypass_trylock()
1546 * Release the specified rcu_data structure's ->nocb_bypass_lock.
1549 __releases(&rdp->nocb_bypass_lock) in rcu_nocb_bypass_unlock()
1552 raw_spin_unlock(&rdp->nocb_bypass_lock); in rcu_nocb_bypass_unlock()
1556 * Acquire the specified rcu_data structure's ->nocb_lock, but only
1557 * if it corresponds to a no-CBs CPU.
1562 if (!rcu_segcblist_is_offloaded(&rdp->cblist)) in rcu_nocb_lock()
1564 raw_spin_lock(&rdp->nocb_lock); in rcu_nocb_lock()
1568 * Release the specified rcu_data structure's ->nocb_lock, but only
1569 * if it corresponds to a no-CBs CPU.
1573 if (rcu_segcblist_is_offloaded(&rdp->cblist)) { in rcu_nocb_unlock()
1575 raw_spin_unlock(&rdp->nocb_lock); in rcu_nocb_unlock()
1580 * Release the specified rcu_data structure's ->nocb_lock and restore
1581 * interrupts, but only if it corresponds to a no-CBs CPU.
1586 if (rcu_segcblist_is_offloaded(&rdp->cblist)) { in rcu_nocb_unlock_irqrestore()
1588 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); in rcu_nocb_unlock_irqrestore()
1594 /* Lockdep check that ->cblist may be safely accessed. */
1598 if (rcu_segcblist_is_offloaded(&rdp->cblist)) in rcu_lockdep_assert_cblist_protected()
1599 lockdep_assert_held(&rdp->nocb_lock); in rcu_lockdep_assert_cblist_protected()
1603 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
1613 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1]; in rcu_nocb_gp_get()
1618 init_swait_queue_head(&rnp->nocb_gp_wq[0]); in rcu_init_one_nocb()
1619 init_swait_queue_head(&rnp->nocb_gp_wq[1]); in rcu_init_one_nocb()
1622 /* Is the specified CPU a no-CBs CPU? */
1631 * Kick the GP kthread for this NOCB group. Caller holds ->nocb_lock
1636 __releases(rdp->nocb_lock) in wake_nocb_gp()
1639 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in wake_nocb_gp()
1641 lockdep_assert_held(&rdp->nocb_lock); in wake_nocb_gp()
1642 if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) { in wake_nocb_gp()
1643 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in wake_nocb_gp()
1648 del_timer(&rdp->nocb_timer); in wake_nocb_gp()
1650 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); in wake_nocb_gp()
1651 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { in wake_nocb_gp()
1652 WRITE_ONCE(rdp_gp->nocb_gp_sleep, false); in wake_nocb_gp()
1654 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); in wake_nocb_gp()
1656 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in wake_nocb_gp()
1658 wake_up_process(rdp_gp->nocb_gp_kthread); in wake_nocb_gp()
1668 if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) in wake_nocb_gp_defer()
1669 mod_timer(&rdp->nocb_timer, jiffies + 1); in wake_nocb_gp_defer()
1670 if (rdp->nocb_defer_wakeup < waketype) in wake_nocb_gp_defer()
1671 WRITE_ONCE(rdp->nocb_defer_wakeup, waketype); in wake_nocb_gp_defer()
1672 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); in wake_nocb_gp_defer()
1676 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
1677 * However, if there is a callback to be enqueued and if ->nocb_bypass
1678 * proves to be initially empty, just return false because the no-CB GP
1688 WARN_ON_ONCE(!rcu_segcblist_is_offloaded(&rdp->cblist)); in rcu_nocb_do_flush_bypass()
1690 lockdep_assert_held(&rdp->nocb_bypass_lock); in rcu_nocb_do_flush_bypass()
1691 if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) { in rcu_nocb_do_flush_bypass()
1692 raw_spin_unlock(&rdp->nocb_bypass_lock); in rcu_nocb_do_flush_bypass()
1695 /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */ in rcu_nocb_do_flush_bypass()
1697 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ in rcu_nocb_do_flush_bypass()
1698 rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); in rcu_nocb_do_flush_bypass()
1699 rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl); in rcu_nocb_do_flush_bypass()
1700 WRITE_ONCE(rdp->nocb_bypass_first, j); in rcu_nocb_do_flush_bypass()
1706 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
1707 * However, if there is a callback to be enqueued and if ->nocb_bypass
1708 * proves to be initially empty, just return false because the no-CB GP
1716 if (!rcu_segcblist_is_offloaded(&rdp->cblist)) in rcu_nocb_flush_bypass()
1724 * If the ->nocb_bypass_lock is immediately available, flush the
1725 * ->nocb_bypass queue into ->cblist.
1730 if (!rcu_segcblist_is_offloaded(&rdp->cblist) || in rcu_nocb_try_flush_bypass()
1737 * See whether it is appropriate to use the ->nocb_bypass list in order
1738 * to control contention on ->nocb_lock. A limited number of direct
1739 * enqueues are permitted into ->cblist per jiffy. If ->nocb_bypass
1740 * is non-empty, further callbacks must be placed into ->nocb_bypass,
1742 * back to direct use of ->cblist. However, ->nocb_bypass should not be
1743 * used if ->cblist is empty, because otherwise callbacks can be stranded
1744 * on ->nocb_bypass because we cannot count on the current CPU ever again
1745 * invoking call_rcu(). The general rule is that if ->nocb_bypass is
1746 * non-empty, the corresponding no-CBs grace-period kthread must not be
1749 * Finally, it is not permitted to use the bypass during early boot,
1750 * as doing so would confuse the auto-initialization code. Besides
1760 long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); in rcu_nocb_try_bypass()
1762 if (!rcu_segcblist_is_offloaded(&rdp->cblist)) { in rcu_nocb_try_bypass()
1763 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
1768 // Don't use ->nocb_bypass during early boot. in rcu_nocb_try_bypass()
1771 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); in rcu_nocb_try_bypass()
1772 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
1777 // moving back from ->nocb_bypass to ->cblist. in rcu_nocb_try_bypass()
1778 if (j == rdp->nocb_nobypass_last) { in rcu_nocb_try_bypass()
1779 c = rdp->nocb_nobypass_count + 1; in rcu_nocb_try_bypass()
1781 WRITE_ONCE(rdp->nocb_nobypass_last, j); in rcu_nocb_try_bypass()
1782 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy; in rcu_nocb_try_bypass()
1783 if (ULONG_CMP_LT(rdp->nocb_nobypass_count, in rcu_nocb_try_bypass()
1789 WRITE_ONCE(rdp->nocb_nobypass_count, c); in rcu_nocb_try_bypass()
1791 // If there hasn't yet been all that many ->cblist enqueues in rcu_nocb_try_bypass()
1792 // this jiffy, tell the caller to enqueue onto ->cblist. But flush in rcu_nocb_try_bypass()
1793 // ->nocb_bypass first. in rcu_nocb_try_bypass()
1794 if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) { in rcu_nocb_try_bypass()
1796 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
1798 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass()
1801 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); in rcu_nocb_try_bypass()
1805 // If ->nocb_bypass has been used too long or is too full, in rcu_nocb_try_bypass()
1806 // flush ->nocb_bypass to ->cblist. in rcu_nocb_try_bypass()
1807 if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) || in rcu_nocb_try_bypass()
1811 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
1813 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass()
1815 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); in rcu_nocb_try_bypass()
1818 if (j != rdp->nocb_gp_adv_time && in rcu_nocb_try_bypass()
1819 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && in rcu_nocb_try_bypass()
1820 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { in rcu_nocb_try_bypass()
1821 rcu_advance_cbs_nowake(rdp->mynode, rdp); in rcu_nocb_try_bypass()
1822 rdp->nocb_gp_adv_time = j; in rcu_nocb_try_bypass()
1828 // We need to use the bypass. in rcu_nocb_try_bypass()
1831 ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); in rcu_nocb_try_bypass()
1832 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ in rcu_nocb_try_bypass()
1833 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); in rcu_nocb_try_bypass()
1835 WRITE_ONCE(rdp->nocb_bypass_first, j); in rcu_nocb_try_bypass()
1836 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ")); in rcu_nocb_try_bypass()
1843 // No-CBs GP kthread might be indefinitely asleep, if so, wake. in rcu_nocb_try_bypass()
1845 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) { in rcu_nocb_try_bypass()
1846 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass()
1850 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass()
1859 * Awaken the no-CBs grace-period kthead if needed, either due to it
1866 __releases(rdp->nocb_lock) in __call_rcu_nocb_wake()
1874 t = READ_ONCE(rdp->nocb_gp_kthread); in __call_rcu_nocb_wake()
1876 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in __call_rcu_nocb_wake()
1882 len = rcu_segcblist_n_cbs(&rdp->cblist); in __call_rcu_nocb_wake()
1884 rdp->qlen_last_fqs_check = len; in __call_rcu_nocb_wake()
1888 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in __call_rcu_nocb_wake()
1895 } else if (len > rdp->qlen_last_fqs_check + qhimark) { in __call_rcu_nocb_wake()
1897 rdp->qlen_last_fqs_check = len; in __call_rcu_nocb_wake()
1899 if (j != rdp->nocb_gp_adv_time && in __call_rcu_nocb_wake()
1900 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && in __call_rcu_nocb_wake()
1901 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { in __call_rcu_nocb_wake()
1902 rcu_advance_cbs_nowake(rdp->mynode, rdp); in __call_rcu_nocb_wake()
1903 rdp->nocb_gp_adv_time = j; in __call_rcu_nocb_wake()
1906 if ((rdp->nocb_cb_sleep || in __call_rcu_nocb_wake()
1907 !rcu_segcblist_ready_cbs(&rdp->cblist)) && in __call_rcu_nocb_wake()
1908 !timer_pending(&rdp->nocb_bypass_timer)) in __call_rcu_nocb_wake()
1913 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); in __call_rcu_nocb_wake()
1919 /* Wake up the no-CBs GP kthread to flush ->nocb_bypass. */
1925 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer")); in do_nocb_bypass_wakeup_timer()
1932 * No-CBs GP kthreads come here to wait for additional callbacks to show up
1937 bool bypass = false; in nocb_gp_wait() local
1939 int __maybe_unused cpu = my_rdp->cpu; in nocb_gp_wait()
1955 * and the global grace-period kthread are awakened if needed. in nocb_gp_wait()
1957 WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp); in nocb_gp_wait()
1958 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) { in nocb_gp_wait()
1959 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check")); in nocb_gp_wait()
1961 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); in nocb_gp_wait()
1963 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) || in nocb_gp_wait()
1965 // Bypass full or old, so flush it. in nocb_gp_wait()
1967 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); in nocb_gp_wait()
1968 } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) { in nocb_gp_wait()
1973 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in nocb_gp_wait()
1974 TPS("Bypass")); in nocb_gp_wait()
1975 bypass = true; in nocb_gp_wait()
1977 rnp = rdp->mynode; in nocb_gp_wait()
1978 if (bypass) { // Avoid race with first bypass CB. in nocb_gp_wait()
1979 WRITE_ONCE(my_rdp->nocb_defer_wakeup, in nocb_gp_wait()
1981 del_timer(&my_rdp->nocb_timer); in nocb_gp_wait()
1985 if (!rcu_segcblist_restempty(&rdp->cblist, in nocb_gp_wait()
1987 (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && in nocb_gp_wait()
1988 rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) { in nocb_gp_wait()
1991 wasempty = rcu_segcblist_restempty(&rdp->cblist, in nocb_gp_wait()
1997 !rcu_segcblist_restempty(&rdp->cblist, in nocb_gp_wait()
1999 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) { in nocb_gp_wait()
2004 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in nocb_gp_wait()
2007 if (rcu_segcblist_ready_cbs(&rdp->cblist)) { in nocb_gp_wait()
2008 needwake = rdp->nocb_cb_sleep; in nocb_gp_wait()
2009 WRITE_ONCE(rdp->nocb_cb_sleep, false); in nocb_gp_wait()
2010 smp_mb(); /* CB invocation -after- GP end. */ in nocb_gp_wait()
2016 swake_up_one(&rdp->nocb_cb_wq); in nocb_gp_wait()
2023 my_rdp->nocb_gp_bypass = bypass; in nocb_gp_wait()
2024 my_rdp->nocb_gp_gp = needwait_gp; in nocb_gp_wait()
2025 my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0; in nocb_gp_wait()
2026 if (bypass && !rcu_nocb_poll) { in nocb_gp_wait()
2027 // At least one child with non-empty ->nocb_bypass, so set in nocb_gp_wait()
2029 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
2030 mod_timer(&my_rdp->nocb_bypass_timer, j + 2); in nocb_gp_wait()
2031 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
2041 swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq, in nocb_gp_wait()
2042 !READ_ONCE(my_rdp->nocb_gp_sleep)); in nocb_gp_wait()
2045 rnp = my_rdp->mynode; in nocb_gp_wait()
2048 rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1], in nocb_gp_wait()
2049 rcu_seq_done(&rnp->gp_seq, wait_gp_seq) || in nocb_gp_wait()
2050 !READ_ONCE(my_rdp->nocb_gp_sleep)); in nocb_gp_wait()
2054 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
2055 if (bypass) in nocb_gp_wait()
2056 del_timer(&my_rdp->nocb_bypass_timer); in nocb_gp_wait()
2057 WRITE_ONCE(my_rdp->nocb_gp_sleep, true); in nocb_gp_wait()
2058 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
2060 my_rdp->nocb_gp_seq = -1; in nocb_gp_wait()
2065 * No-CBs grace-period-wait kthread. There is one of these per group
2070 * that then have callback-invocation work to do.
2077 WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1); in rcu_nocb_gp_kthread()
2085 * Invoke any ready callbacks from the corresponding no-CBs CPU,
2093 struct rcu_node *rnp = rdp->mynode; in nocb_cb_wait()
2103 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && in nocb_cb_wait()
2104 rcu_seq_done(&rnp->gp_seq, cur_gp_seq) && in nocb_cb_wait()
2106 needwake_gp = rcu_advance_cbs(rdp->mynode, rdp); in nocb_cb_wait()
2109 if (rcu_segcblist_ready_cbs(&rdp->cblist)) { in nocb_cb_wait()
2116 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep")); in nocb_cb_wait()
2117 WRITE_ONCE(rdp->nocb_cb_sleep, true); in nocb_cb_wait()
2121 swait_event_interruptible_exclusive(rdp->nocb_cb_wq, in nocb_cb_wait()
2122 !READ_ONCE(rdp->nocb_cb_sleep)); in nocb_cb_wait()
2123 if (!smp_load_acquire(&rdp->nocb_cb_sleep)) { /* VVV */ in nocb_cb_wait()
2128 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); in nocb_cb_wait()
2132 * Per-rcu_data kthread, but only for no-CBs CPUs. Repeatedly invoke
2151 return READ_ONCE(rdp->nocb_defer_wakeup); in rcu_nocb_need_deferred_wakeup()
2165 ndw = READ_ONCE(rdp->nocb_defer_wakeup); in do_nocb_deferred_wakeup_common()
2166 WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); in do_nocb_deferred_wakeup_common()
2168 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); in do_nocb_deferred_wakeup_common()
2181 * This means we do an inexact common-case check. Note that if
2182 * we miss, ->nocb_timer will eventually clean things up.
2226 pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); in rcu_init_nohz()
2230 if (rcu_segcblist_empty(&rdp->cblist)) in rcu_init_nohz()
2231 rcu_segcblist_init(&rdp->cblist); in rcu_init_nohz()
2232 rcu_segcblist_offload(&rdp->cblist); in rcu_init_nohz()
2237 /* Initialize per-rcu_data variables for no-CBs CPUs. */
2240 init_swait_queue_head(&rdp->nocb_cb_wq); in rcu_boot_init_nocb_percpu_data()
2241 init_swait_queue_head(&rdp->nocb_gp_wq); in rcu_boot_init_nocb_percpu_data()
2242 raw_spin_lock_init(&rdp->nocb_lock); in rcu_boot_init_nocb_percpu_data()
2243 raw_spin_lock_init(&rdp->nocb_bypass_lock); in rcu_boot_init_nocb_percpu_data()
2244 raw_spin_lock_init(&rdp->nocb_gp_lock); in rcu_boot_init_nocb_percpu_data()
2245 timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); in rcu_boot_init_nocb_percpu_data()
2246 timer_setup(&rdp->nocb_bypass_timer, do_nocb_bypass_wakeup_timer, 0); in rcu_boot_init_nocb_percpu_data()
2247 rcu_cblist_init(&rdp->nocb_bypass); in rcu_boot_init_nocb_percpu_data()
2251 * If the specified CPU is a no-CBs CPU that does not already have its
2262 * If this isn't a no-CBs CPU or if it already has an rcuo kthread, in rcu_spawn_one_nocb_kthread()
2265 if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread) in rcu_spawn_one_nocb_kthread()
2269 rdp_gp = rdp->nocb_gp_rdp; in rcu_spawn_one_nocb_kthread()
2270 if (!rdp_gp->nocb_gp_kthread) { in rcu_spawn_one_nocb_kthread()
2272 "rcuog/%d", rdp_gp->cpu); in rcu_spawn_one_nocb_kthread()
2275 WRITE_ONCE(rdp_gp->nocb_gp_kthread, t); in rcu_spawn_one_nocb_kthread()
2283 WRITE_ONCE(rdp->nocb_cb_kthread, t); in rcu_spawn_one_nocb_kthread()
2284 WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread); in rcu_spawn_one_nocb_kthread()
2288 * If the specified CPU is a no-CBs CPU that does not already have its
2299 * no-CBs CPUs. This assumes that the early_initcall()s happen before
2300 * non-boot CPUs come online -- if this changes, we will need to add
2311 /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
2312 static int rcu_nocb_gp_stride = -1;
2316 * Initialize GP-CB relationships for all no-CBs CPU.
2332 if (ls == -1) { in rcu_organize_nocb_kthreads()
2344 if (rdp->cpu >= nl) { in rcu_organize_nocb_kthreads()
2347 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; in rcu_organize_nocb_kthreads()
2348 rdp->nocb_gp_rdp = rdp; in rcu_organize_nocb_kthreads()
2356 pr_alert("%s: No-CB GP kthread CPU %d:", in rcu_organize_nocb_kthreads()
2362 rdp->nocb_gp_rdp = rdp_gp; in rcu_organize_nocb_kthreads()
2363 rdp_prev->nocb_next_cb_rdp = rdp; in rcu_organize_nocb_kthreads()
2380 WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask)); in rcu_bind_current_to_nocb()
2385 * Dump out nocb grace-period kthread state for the specified rcu_data
2390 struct rcu_node *rnp = rdp->mynode; in show_rcu_nocb_gp_state()
2393 rdp->cpu, in show_rcu_nocb_gp_state()
2394 "kK"[!!rdp->nocb_gp_kthread], in show_rcu_nocb_gp_state()
2395 "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)], in show_rcu_nocb_gp_state()
2396 "dD"[!!rdp->nocb_defer_wakeup], in show_rcu_nocb_gp_state()
2397 "tT"[timer_pending(&rdp->nocb_timer)], in show_rcu_nocb_gp_state()
2398 "bB"[timer_pending(&rdp->nocb_bypass_timer)], in show_rcu_nocb_gp_state()
2399 "sS"[!!rdp->nocb_gp_sleep], in show_rcu_nocb_gp_state()
2400 ".W"[swait_active(&rdp->nocb_gp_wq)], in show_rcu_nocb_gp_state()
2401 ".W"[swait_active(&rnp->nocb_gp_wq[0])], in show_rcu_nocb_gp_state()
2402 ".W"[swait_active(&rnp->nocb_gp_wq[1])], in show_rcu_nocb_gp_state()
2403 ".B"[!!rdp->nocb_gp_bypass], in show_rcu_nocb_gp_state()
2404 ".G"[!!rdp->nocb_gp_gp], in show_rcu_nocb_gp_state()
2405 (long)rdp->nocb_gp_seq, in show_rcu_nocb_gp_state()
2406 rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops)); in show_rcu_nocb_gp_state()
2412 struct rcu_segcblist *rsclp = &rdp->cblist; in show_rcu_nocb_state()
2417 if (rdp->nocb_gp_rdp == rdp) in show_rcu_nocb_state()
2420 pr_info(" CB %d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%c%c%c q%ld\n", in show_rcu_nocb_state()
2421 rdp->cpu, rdp->nocb_gp_rdp->cpu, in show_rcu_nocb_state()
2422 "kK"[!!rdp->nocb_cb_kthread], in show_rcu_nocb_state()
2423 "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)], in show_rcu_nocb_state()
2424 "cC"[!!atomic_read(&rdp->nocb_lock_contended)], in show_rcu_nocb_state()
2425 "lL"[raw_spin_is_locked(&rdp->nocb_lock)], in show_rcu_nocb_state()
2426 "sS"[!!rdp->nocb_cb_sleep], in show_rcu_nocb_state()
2427 ".W"[swait_active(&rdp->nocb_cb_wq)], in show_rcu_nocb_state()
2428 jiffies - rdp->nocb_bypass_first, in show_rcu_nocb_state()
2429 jiffies - rdp->nocb_nobypass_last, in show_rcu_nocb_state()
2430 rdp->nocb_nobypass_count, in show_rcu_nocb_state()
2435 ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)], in show_rcu_nocb_state()
2436 rcu_segcblist_n_cbs(&rdp->cblist)); in show_rcu_nocb_state()
2439 if (rdp->nocb_gp_rdp == rdp) in show_rcu_nocb_state()
2442 waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock); in show_rcu_nocb_state()
2443 wastimer = timer_pending(&rdp->nocb_bypass_timer); in show_rcu_nocb_state()
2444 wassleep = swait_active(&rdp->nocb_gp_wq); in show_rcu_nocb_state()
2445 if (!rdp->nocb_gp_sleep && !waslocked && !wastimer && !wassleep) in show_rcu_nocb_state()
2448 pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c%c %c\n", in show_rcu_nocb_state()
2450 "dD"[!!rdp->nocb_defer_wakeup], in show_rcu_nocb_state()
2452 "sS"[!!rdp->nocb_gp_sleep], in show_rcu_nocb_state()
2458 /* No ->nocb_lock to acquire. */
2463 /* No ->nocb_lock to release. */
2468 /* No ->nocb_lock to release. */
2475 /* Lockdep check that ->cblist may be safely accessed. */
2541 * grace-period kthread will do force_quiescent_state() processing?
2560 * Bind the RCU grace-period kthreads to the housekeeping CPU.
2569 /* Record the current task on dyntick-idle entry. */
2573 WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); in rcu_dynticks_task_enter()
2577 /* Record no current task on dyntick-idle exit. */
2581 WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); in rcu_dynticks_task_exit()
2590 current->trc_reader_special.b.need_mb = true; in rcu_dynticks_task_trace_enter()
2599 current->trc_reader_special.b.need_mb = false; in rcu_dynticks_task_trace_exit()