Lines Matching +full:wait +full:- +full:state

1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Task-based RCU implementations.
23 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
26 * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
28 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
29 * @gp_func: This flavor's grace-period-wait function.
30 * @gp_state: Grace period's most recent state transition (debugging).
31 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
34 * @gp_start: Most recent grace-period start in jiffies.
37 * @n_ipis_fails: Number of IPI-send failures.
38 * @pregp_func: This flavor's pre-grace-period function (optional).
39 * @pertask_func: This flavor's per-task scan function (optional).
40 * @postscan_func: This flavor's post-task scan function (optional).
41 * @holdouts_func: This flavor's holdout-list scan function (optional).
42 * @postgp_func: This flavor's post-grace-period function (optional).
43 * @call_func: This flavor's call_rcu()-equivalent function.
97 /* RCU tasks grace-period state for debugging. */
131 /* Record grace-period phase and time. */
134 rtp->gp_state = newstate; in set_tasks_gp_state()
135 rtp->gp_jiffies = jiffies; in set_tasks_gp_state()
139 /* Return state name. */
142 int i = data_race(rtp->gp_state); // Let KCSAN detect update races in tasks_gp_state_getname()
158 rhp->next = NULL; in call_rcu_tasks_generic()
159 rhp->func = func; in call_rcu_tasks_generic()
160 raw_spin_lock_irqsave(&rtp->cbs_lock, flags); in call_rcu_tasks_generic()
161 needwake = !rtp->cbs_head; in call_rcu_tasks_generic()
162 WRITE_ONCE(*rtp->cbs_tail, rhp); in call_rcu_tasks_generic()
163 rtp->cbs_tail = &rhp->next; in call_rcu_tasks_generic()
164 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags); in call_rcu_tasks_generic()
166 if (needwake && READ_ONCE(rtp->kthread_ptr)) in call_rcu_tasks_generic()
167 wake_up(&rtp->cbs_wq); in call_rcu_tasks_generic()
170 // Wait for a grace period for the specified flavor of Tasks RCU.
177 /* Wait for the grace period. */ in synchronize_rcu_tasks_generic()
178 wait_rcu_gp(rtp->call_func); in synchronize_rcu_tasks_generic()
181 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
191 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start! in rcu_tasks_kthread()
196 * one RCU-tasks grace period and then invokes the callbacks. in rcu_tasks_kthread()
197 * This loop is terminated by the system going down. ;-) in rcu_tasks_kthread()
202 raw_spin_lock_irqsave(&rtp->cbs_lock, flags); in rcu_tasks_kthread()
204 list = rtp->cbs_head; in rcu_tasks_kthread()
205 rtp->cbs_head = NULL; in rcu_tasks_kthread()
206 rtp->cbs_tail = &rtp->cbs_head; in rcu_tasks_kthread()
207 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags); in rcu_tasks_kthread()
209 /* If there were none, wait a bit and start over. */ in rcu_tasks_kthread()
211 wait_event_interruptible(rtp->cbs_wq, in rcu_tasks_kthread()
212 READ_ONCE(rtp->cbs_head)); in rcu_tasks_kthread()
213 if (!rtp->cbs_head) { in rcu_tasks_kthread()
221 // Wait for one grace period. in rcu_tasks_kthread()
223 rtp->gp_start = jiffies; in rcu_tasks_kthread()
224 rtp->gp_func(rtp); in rcu_tasks_kthread()
225 rtp->n_gps++; in rcu_tasks_kthread()
230 next = list->next; in rcu_tasks_kthread()
232 list->func(list); in rcu_tasks_kthread()
238 schedule_timeout_idle(rtp->gp_sleep); in rcu_tasks_kthread()
244 /* Spawn RCU-tasks grace-period kthread. */
249 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); in rcu_spawn_tasks_kthread_generic()
250 …NCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __fu… in rcu_spawn_tasks_kthread_generic()
258 * Print any non-default Tasks RCU settings.
264 …pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_s… in rcu_tasks_bootup_oddness()
280 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
284 rtp->kname, in show_rcu_tasks_generic_gp_kthread()
285 tasks_gp_state_getname(rtp), data_race(rtp->gp_state), in show_rcu_tasks_generic_gp_kthread()
286 jiffies - data_race(rtp->gp_jiffies), in show_rcu_tasks_generic_gp_kthread()
287 data_race(rtp->n_gps), in show_rcu_tasks_generic_gp_kthread()
288 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), in show_rcu_tasks_generic_gp_kthread()
289 ".k"[!!data_race(rtp->kthread_ptr)], in show_rcu_tasks_generic_gp_kthread()
290 ".C"[!!data_race(rtp->cbs_head)], in show_rcu_tasks_generic_gp_kthread()
301 // Shared code between task-list-scanning variants of Tasks RCU.
303 /* Wait for one RCU-tasks grace period. */
312 rtp->pregp_func(); in rcu_tasks_wait_gp()
315 * There were callbacks, so we need to wait for an RCU-tasks in rcu_tasks_wait_gp()
323 rtp->pertask_func(t, &holdouts); in rcu_tasks_wait_gp()
327 rtp->postscan_func(&holdouts); in rcu_tasks_wait_gp()
336 // Start off with initial wait and slowly back off to 1 HZ wait. in rcu_tasks_wait_gp()
337 fract = rtp->init_fract; in rcu_tasks_wait_gp()
358 rtp->holdouts_func(&holdouts, needreport, &firstreport); in rcu_tasks_wait_gp()
362 rtp->postgp_func(rtp); in rcu_tasks_wait_gp()
372 // switch, cond_resched_rcu_qs(), user-space execution, and idle.
374 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
376 // state for some of the manipulations involved in tracing and the like.
378 // rates from multiple CPUs. If this is required, per-CPU callback lists
387 // Invokes synchronize_rcu() in order to wait for all in-flight
388 // t->on_rq and t->nvcsw transitions to complete. This works because
390 // rcu_tasks_pertask(), invoked on every non-idle task:
391 // For every runnable non-idle task other than the current one, use
401 // Scans the holdout list, attempting to identify a quiescent state
402 // for each task on the list. If there is a quiescent state, the
406 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
413 // read-side critical sections waited for by rcu_tasks_postscan().
415 // Pre-grace-period update-side code is ordered before the grace via the
416 // ->cbs_lock and the smp_mb__after_spinlock(). Pre-grace-period read-side
421 /* Pre-grace-period preparation. */
425 * Wait for all pre-existing t->on_rq and t->nvcsw transitions in rcu_tasks_pregp_step()
428 * synchronize_rcu(), a read-side critical section that started in rcu_tasks_pregp_step()
433 * memory barrier on the first store to t->rcu_tasks_holdout, in rcu_tasks_pregp_step()
440 /* Per-task initial processing. */
443 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { in rcu_tasks_pertask()
445 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); in rcu_tasks_pertask()
446 WRITE_ONCE(t->rcu_tasks_holdout, true); in rcu_tasks_pertask()
447 list_add(&t->rcu_tasks_holdout_list, hop); in rcu_tasks_pertask()
455 * Wait for tasks that are in the process of exiting. This in rcu_tasks_postscan()
470 if (!READ_ONCE(t->rcu_tasks_holdout) || in check_holdout_task()
471 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || in check_holdout_task()
472 !READ_ONCE(t->on_rq) || in check_holdout_task()
474 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { in check_holdout_task()
475 WRITE_ONCE(t->rcu_tasks_holdout, false); in check_holdout_task()
476 list_del_init(&t->rcu_tasks_holdout_list); in check_holdout_task()
491 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, in check_holdout_task()
492 t->rcu_tasks_idle_cpu, cpu); in check_holdout_task()
508 /* Finish off the Tasks-RCU grace period. */
512 * Because ->on_rq and ->nvcsw are not guaranteed to have a full in rcu_tasks_postgp()
514 * reordering on other CPUs could cause their RCU-tasks read-side in rcu_tasks_postgp()
516 * However, because these ->nvcsw updates are carried out with in rcu_tasks_postgp()
520 * This synchronize_rcu() also confines all ->rcu_tasks_holdout in rcu_tasks_postgp()
522 * memory barriers for ->rcu_tasks_holdout accesses. in rcu_tasks_postgp()
535 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
541 * read-side critical sections have completed. call_rcu_tasks() assumes
542 * that the read-side critical sections end at a voluntary context
544 * or transition to usermode execution. As such, there are no read-side
547 * through a safe state, not so much for data-structure synchronization.
559 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
561 * Control will return to the caller some time after a full rcu-tasks
563 * executing rcu-tasks read-side critical sections have elapsed. These
564 * read-side critical sections are delimited by calls to schedule(),
583 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
585 * Although the current implementation is guaranteed to wait, it is not
590 /* There is only one callback queue, so this is easy. ;-) */ in rcu_barrier_tasks()
620 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); in exit_tasks_rcu_start()
630 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); in exit_tasks_rcu_finish()
654 // Ordering is provided by the scheduler's context-switch code.
661 // Wait for one rude RCU-tasks grace period.
664 rtp->n_ipis += cpumask_weight(cpu_online_mask); in rcu_tasks_rude_wait_gp()
673 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
679 * read-side critical sections have completed. call_rcu_tasks_rude()
680 * assumes that the read-side critical sections end at context switch,
682 * there are no read-side primitives analogous to rcu_read_lock() and
684 * that all tasks have passed through a safe state, not so much for
685 * data-structure synchronization.
697 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
699 * Control will return to the caller some time after a rude rcu-tasks
701 * executing rcu-tasks read-side critical sections have elapsed. These
702 * read-side critical sections are delimited by calls to schedule(),
721 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
723 * Although the current implementation is guaranteed to wait, it is not
728 /* There is only one callback queue, so this is easy. ;-) */ in rcu_barrier_tasks_rude()
755 // 1. Has explicit read-side markers to allow finite grace periods
756 // in the face of in-kernel loops for PREEMPT=n builds.
759 // CPU-hotplug code paths, similar to the capabilities of SRCU.
761 // 3. Avoids expensive read-side instruction, having overhead similar
764 // There are of course downsides. The grace-period code can send IPIs to
781 // Initialize the count of readers and block CPU-hotplug operations.
782 // rcu_tasks_trace_pertask(), invoked on every non-idle task:
783 // Initialize per-task state and attempt to identify an immediate
784 // quiescent state for that task, or, failing that, attempt to
786 // rcu_read_unlock_trace() will report the quiescent state (in which
790 // in order to avoid ordering overhead on common-case shared-variable
793 // Initialize state and attempt to identify an immediate quiescent
794 // state as above (but only for idle tasks), unblock CPU-hotplug
795 // operations, and wait for an RCU grace period to avoid races with
798 // Scans the holdout list, attempting to identify a quiescent state
799 // for each task on the list. If there is a quiescent state, the
802 // Wait for the count of readers do drop to zero, reporting any stalls.
808 // Pre-grace-period update-side code is ordered before the grace
809 // period via the ->cbs_lock and barriers in rcu_tasks_kthread().
810 // Pre-grace-period read-side code is ordered before the grace period by
812 // scheduler context-switch ordering (for locked-down non-running readers).
814 // The lockdep state must be outside of #ifdef to be useful.
824 static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
830 // The number of detections of task quiescent state relying on
850 /* If we are the last reader, wake up the grace-period kthread. */
853 int nq = READ_ONCE(t->trc_reader_special.b.need_qs); in rcu_read_unlock_trace_special()
856 t->trc_reader_special.b.need_mb) in rcu_read_unlock_trace_special()
857 smp_mb(); // Pairs with update-side barriers. in rcu_read_unlock_trace_special()
858 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. in rcu_read_unlock_trace_special()
860 WRITE_ONCE(t->trc_reader_special.b.need_qs, false); in rcu_read_unlock_trace_special()
861 WRITE_ONCE(t->trc_reader_nesting, nesting); in rcu_read_unlock_trace_special()
870 if (list_empty(&t->trc_holdout_list)) { in trc_add_holdout()
872 list_add(&t->trc_holdout_list, bhp); in trc_add_holdout()
879 if (!list_empty(&t->trc_holdout_list)) { in trc_del_holdout()
880 list_del_init(&t->trc_holdout_list); in trc_del_holdout()
885 /* IPI handler to check task state. */
898 // If the task is not in a read-side critical section, and in trc_read_check_handler()
899 // if this is the last reader, awaken the grace-period kthread. in trc_read_check_handler()
900 if (likely(!READ_ONCE(t->trc_reader_nesting))) { in trc_read_check_handler()
905 WRITE_ONCE(t->trc_reader_checked, true); in trc_read_check_handler()
909 if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) { in trc_read_check_handler()
914 WRITE_ONCE(t->trc_reader_checked, true); in trc_read_check_handler()
916 // Get here if the task is in a read-side critical section. Set in trc_read_check_handler()
917 // its state so that it will awaken the grace-period kthread upon in trc_read_check_handler()
919 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)); in trc_read_check_handler()
920 WRITE_ONCE(t->trc_reader_special.b.need_qs, true); in trc_read_check_handler()
927 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ in trc_read_check_handler()
930 /* Callback function for scheduler to check locked-down task. */
945 // we can inspect its state despite its currently running. in trc_inspect_reader()
946 // However, we cannot safely change its state. in trc_inspect_reader()
949 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting)) in trc_inspect_reader()
950 return false; // No quiescent state, do it the hard way. in trc_inspect_reader()
956 // The task is not running, so C-language access is safe. in trc_inspect_reader()
957 in_qs = likely(!t->trc_reader_nesting); in trc_inspect_reader()
960 // Mark as checked so that the grace-period kthread will in trc_inspect_reader()
962 t->trc_reader_checked = true; in trc_inspect_reader()
965 return true; // Already in quiescent state, done!!! in trc_inspect_reader()
967 // The task is in a read-side critical section, so set up its in trc_inspect_reader()
968 // state so that it will awaken the grace-period kthread upon exit in trc_inspect_reader()
970 atomic_inc(&trc_n_readers_need_end); // One more to wait on. in trc_inspect_reader()
971 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)); in trc_inspect_reader()
972 WRITE_ONCE(t->trc_reader_special.b.need_qs, true); in trc_inspect_reader()
976 /* Attempt to extract the state for the specified task. */
983 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI in trc_wait_for_one_reader()
986 // The current task had better be in a quiescent state. in trc_wait_for_one_reader()
988 t->trc_reader_checked = true; in trc_wait_for_one_reader()
989 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); in trc_wait_for_one_reader()
1002 // an RCU read-side critical section. Otherwise, the invocation of in trc_wait_for_one_reader()
1015 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) in trc_wait_for_one_reader()
1020 t->trc_ipi_to_cpu = cpu; in trc_wait_for_one_reader()
1028 t->trc_ipi_to_cpu = cpu; in trc_wait_for_one_reader()
1037 /* Initialize for a new RCU-tasks-trace grace period. */
1042 // Allow for fast-acting IPIs. in rcu_tasks_trace_pregp_step()
1050 // This also waits for all readers in CPU-hotplug code paths. in rcu_tasks_trace_pregp_step()
1054 /* Do first-round processing for the specified task. */
1063 WRITE_ONCE(t->trc_reader_special.b.need_qs, false); in rcu_tasks_trace_pertask()
1064 WRITE_ONCE(t->trc_reader_checked, false); in rcu_tasks_trace_pertask()
1065 t->trc_ipi_to_cpu = -1; in rcu_tasks_trace_pertask()
1080 // Re-enable CPU hotplug now that the tasklist scan has completed. in rcu_tasks_trace_postscan()
1083 // Wait for late-stage exiting tasks to finish exiting. in rcu_tasks_trace_postscan()
1086 // Any tasks that exit after this point will set ->trc_reader_checked. in rcu_tasks_trace_postscan()
1089 /* Show the state of a task stalling the current RCU tasks trace GP. */
1101 t->pid, in show_stalled_task_trace()
1102 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0], in show_stalled_task_trace()
1105 READ_ONCE(t->trc_reader_nesting), in show_stalled_task_trace()
1106 " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)], in show_stalled_task_trace()
1132 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && in check_all_holdout_tasks_trace()
1133 !READ_ONCE(t->trc_reader_checked)) in check_all_holdout_tasks_trace()
1137 if (READ_ONCE(t->trc_reader_checked)) in check_all_holdout_tasks_trace()
1143 // Re-enable CPU hotplug now that the holdout list scan has completed. in check_all_holdout_tasks_trace()
1153 /* Wait for grace period to complete and provide ordering. */
1166 // Wait for readers. in rcu_tasks_trace_postgp()
1178 if (READ_ONCE(t->trc_reader_special.b.need_qs)) in rcu_tasks_trace_postgp()
1183 if (READ_ONCE(t->trc_reader_special.b.need_qs)) in rcu_tasks_trace_postgp()
1196 /* Report any needed quiescent state for this exiting task. */
1199 WRITE_ONCE(t->trc_reader_checked, true); in exit_tasks_rcu_finish_trace()
1200 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); in exit_tasks_rcu_finish_trace()
1201 WRITE_ONCE(t->trc_reader_nesting, 0); in exit_tasks_rcu_finish_trace()
1202 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs))) in exit_tasks_rcu_finish_trace()
1207 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1213 * read-side critical sections have completed. call_rcu_tasks_trace()
1214 * assumes that the read-side critical sections end at context switch,
1216 * there are no read-side primitives analogous to rcu_read_lock() and
1218 * that all tasks have passed through a safe state, not so much for
1219 * data-structure synchronization.
1231 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1233 * Control will return to the caller some time after a trace rcu-tasks
1235 * rcu-tasks read-side critical sections have elapsed. These read-side
1249 …ce_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section… in synchronize_rcu_tasks_trace()
1255 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1257 * Although the current implementation is guaranteed to wait, it is not
1262 /* There is only one callback queue, so this is easy. ;-) */ in rcu_barrier_tasks_trace()
1346 pr_info("Callback from %s invoked.\n", rttd->name); in test_rcu_tasks_callback()
1348 rttd->notrun = true; in test_rcu_tasks_callback()
1353 pr_info("Running RCU-tasks wait API self tests\n"); in rcu_tasks_initiate_self_tests()
1378 ret = -1; in rcu_tasks_verify_self_tests()
1406 // Run the self-tests. in rcu_init_tasks_generic()