Searched refs:rcu_state (Results 1 – 7 of 7) sorted by relevance
| /Linux-v5.4/kernel/rcu/ |
| D | tree_stall.h | 92 WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2); in rcu_cpu_stall_reset() 105 rcu_state.gp_start = j; in record_gp_stall_check_time() 108 smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */ in record_gp_stall_check_time() 109 rcu_state.jiffies_resched = j + j1 / 2; in record_gp_stall_check_time() 110 rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); in record_gp_stall_check_time() 131 j = READ_ONCE(rcu_state.jiffies_kick_kthreads); in rcu_stall_kick_kthreads() 132 if (time_after(jiffies, j) && rcu_state.gp_kthread && in rcu_stall_kick_kthreads() 133 (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { in rcu_stall_kick_kthreads() 135 rcu_state.name); in rcu_stall_kick_kthreads() 137 wake_up_process(rcu_state.gp_kthread); in rcu_stall_kick_kthreads() [all …]
|
| D | tree.c | 87 struct rcu_state rcu_state = { variable 88 .level = { &rcu_state.node[0] }, 91 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), 94 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), 95 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), 96 .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), 203 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); in rcu_gp_in_progress() 506 return READ_ONCE(rcu_state.gp_seq); in rcu_get_gp_seq() 518 return rcu_state.expedited_sequence; in rcu_exp_batches_completed() 527 return &rcu_state.node[0]; in rcu_get_root() [all …]
|
| D | tree_exp.h | 20 rcu_seq_start(&rcu_state.expedited_sequence); in rcu_exp_gp_seq_start() 29 return rcu_seq_endval(&rcu_state.expedited_sequence); in rcu_exp_gp_seq_endval() 37 rcu_seq_end(&rcu_state.expedited_sequence); in rcu_exp_gp_seq_end() 49 s = rcu_seq_snap(&rcu_state.expedited_sequence); in rcu_exp_gp_seq_snap() 50 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap")); in rcu_exp_gp_seq_snap() 61 return rcu_seq_done(&rcu_state.expedited_sequence, s); in rcu_exp_gp_seq_done() 77 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */ in sync_exp_reset_tree_hotplug() 82 if (likely(ncpus == rcu_state.ncpus_snap)) in sync_exp_reset_tree_hotplug() 84 rcu_state.ncpus_snap = ncpus; in sync_exp_reset_tree_hotplug() 205 swake_up_one(&rcu_state.expedited_wq); in __rcu_report_exp_rnp() [all …]
|
| D | tree_plugin.h | 310 trace_rcu_preempt_task(rcu_state.name, in rcu_note_context_switch() 709 time_after(jiffies, rcu_state.gp_start + HZ)) in rcu_flavor_sched_clock_irq() 1126 rcu_state.boost = 1; in rcu_spawn_one_boost_kthread() 1650 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in wake_nocb_gp() 1661 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); in wake_nocb_gp() 1679 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); in wake_nocb_gp_defer() 1805 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass() 1820 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass() 1843 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ")); in rcu_nocb_try_bypass() 1853 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass() [all …]
|
| D | rcu.h | 320 #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) 326 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) 337 srcu_for_each_node_breadth_first(&rcu_state, rnp) 347 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
|
| D | tree.h | 290 struct rcu_state { struct
|
| /Linux-v5.4/Documentation/RCU/ |
| D | stallwarn.txt | 185 possible for an rcu_state stall to be caused by both CPUs -and- tasks,
|