Home
last modified time | relevance | path

Searched refs:synchronize_sched (Results 1 – 25 of 36) sorted by relevance

12

/Linux-v4.19/include/linux/
Drcutiny.h64 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ in synchronize_rcu_expedited()
74 synchronize_sched(); in synchronize_rcu_bh()
79 synchronize_sched(); in synchronize_rcu_bh_expedited()
84 synchronize_sched(); in synchronize_sched_expedited()
Drcupdate.h60 void synchronize_sched(void);
93 synchronize_sched(); in synchronize_rcu()
183 #define synchronize_rcu_tasks synchronize_sched
Dtracepoint.h85 synchronize_sched(); in tracepoint_synchronize_unregister()
/Linux-v4.19/kernel/sched/
Dmembarrier.c213 synchronize_sched(); in membarrier_register_global_expedited()
249 synchronize_sched(); in membarrier_register_private_expedited()
301 synchronize_sched(); in SYSCALL_DEFINE2()
Dcpufreq_schedutil.c842 synchronize_sched(); in sugov_stop()
/Linux-v4.19/Documentation/RCU/
DNMI-RCU.txt84 One way to accomplish this is via synchronize_sched(), perhaps as
88 synchronize_sched();
91 This works because synchronize_sched() blocks until all CPUs complete
93 Since NMI handlers disable preemption, synchronize_sched() is guaranteed
95 to free up the handler's data as soon as synchronize_sched() returns.
DUP.txt89 It -is- safe for synchronize_sched() and synchronize_rcu_bh() to return
Dchecklist.txt192 synchronize_rcu_bh(), synchronize_sched(), synchronize_srcu(),
220 updater uses call_rcu_sched() or synchronize_sched(), then
342 need to use synchronize_irq() or synchronize_sched().
DwhatisRCU.txt339 c. synchronize_sched() rcu_read_lock_sched() / rcu_read_unlock_sched()
879 rcu_read_lock_sched synchronize_sched rcu_barrier_sched
/Linux-v4.19/kernel/rcu/
Dtiny.c181 void synchronize_sched(void) in synchronize_sched() function
188 EXPORT_SYMBOL_GPL(synchronize_sched);
Dupdate.c207 synchronize_sched(); in rcu_test_sync_prims()
716 synchronize_sched(); in rcu_tasks_kthread()
806 synchronize_sched(); in rcu_tasks_kthread()
Dsync.c47 .sync = synchronize_sched,
Drcuperf.c333 .sync = synchronize_sched,
Dtree.c3140 void synchronize_sched(void) in synchronize_sched() function
3153 EXPORT_SYMBOL_GPL(synchronize_sched);
3258 synchronize_sched(); in cond_synchronize_sched()
Drcutorture.c694 .sync = synchronize_sched,
/Linux-v4.19/arch/sparc/oprofile/
Dinit.c56 synchronize_sched(); /* Allow already-started NMIs to complete. */ in timer_stop()
/Linux-v4.19/tools/include/linux/
Dkernel.h118 #define synchronize_sched() macro
/Linux-v4.19/kernel/
Dkprobes.c232 synchronize_sched(); in collect_garbage_slots()
1369 synchronize_sched(); in register_aggr_kprobe()
1584 synchronize_sched(); in register_kprobe()
1764 synchronize_sched(); in unregister_kprobes()
1954 synchronize_sched(); in unregister_kretprobes()
Dmodule.c2162 synchronize_sched(); in free_module()
3520 synchronize_sched(); in do_init_module()
3813 synchronize_sched(); in load_module()
3828 synchronize_sched(); in load_module()
/Linux-v4.19/drivers/cpufreq/
Dcpufreq_governor.c349 synchronize_sched(); in gov_clear_update_util()
/Linux-v4.19/Documentation/
Dkprobes.txt246 rather, it calls synchronize_sched() for safety first, because it's
248 optimized region [3]_. As you know, synchronize_sched() can ensure
249 that all interruptions that were active when synchronize_sched()
/Linux-v4.19/fs/
Dfile.c161 synchronize_sched(); in expand_fdtable()
/Linux-v4.19/kernel/trace/
Dring_buffer.c1837 synchronize_sched(); in ring_buffer_resize()
4194 synchronize_sched(); in ring_buffer_read_prepare_sync()
4366 synchronize_sched(); in ring_buffer_reset_cpu()
Dftrace.c4499 synchronize_sched(); in unregister_ftrace_function_probe_func()
5317 synchronize_sched(); in ftrace_graph_release()
6436 synchronize_sched(); in clear_ftrace_pids()
6583 synchronize_sched(); in ftrace_pid_write()
Dtrace.c1684 synchronize_sched(); in tracing_reset()
1701 synchronize_sched(); in tracing_reset_online_cpus()
2253 synchronize_sched(); in trace_buffered_event_disable()
5409 synchronize_sched(); in tracing_set_tracer()

12