Home
last modified time | relevance | path

Searched full:barrier (Results 1 – 25 of 1249) sorted by relevance

12345678910>>...50

/Linux-v6.1/tools/include/asm/
Dbarrier.h4 #include "../../arch/x86/include/asm/barrier.h"
6 #include "../../arch/arm/include/asm/barrier.h"
8 #include "../../arch/arm64/include/asm/barrier.h"
10 #include "../../arch/powerpc/include/asm/barrier.h"
12 #include "../../arch/s390/include/asm/barrier.h"
14 #include "../../arch/sh/include/asm/barrier.h"
16 #include "../../arch/sparc/include/asm/barrier.h"
18 #include "../../arch/tile/include/asm/barrier.h"
20 #include "../../arch/alpha/include/asm/barrier.h"
22 #include "../../arch/mips/include/asm/barrier.h"
[all …]
/Linux-v6.1/include/linux/
Dspinlock_up.h9 #include <asm/barrier.h>
32 barrier(); in arch_spin_lock()
40 barrier(); in arch_spin_trylock()
47 barrier(); in arch_spin_unlock()
54 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
55 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
57 #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
58 #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
59 #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
[all …]
Dcompiler-intel.h16 #define barrier() __memory_barrier() macro
17 #define barrier_data(ptr) barrier()
24 /* This should act as an optimization barrier on var.
25 * Given that this compiler does not have inline assembly, a compiler barrier
28 #define OPTIMIZER_HIDE_VAR(var) barrier()
Dpreempt.h204 barrier(); \
209 barrier(); \
220 barrier(); \
227 barrier(); \
241 barrier(); \
247 barrier(); \
257 barrier(); \
262 barrier(); \
274 #define preempt_disable() barrier()
275 #define sched_preempt_enable_no_resched() barrier()
[all …]
/Linux-v6.1/Documentation/
Dmemory-barriers.txt29 particular barrier, and
34 for any particular barrier, but if the architecture provides less than
37 Note also that it is possible that a barrier may be a no-op for an
38 architecture because the way that arch works renders an explicit barrier
53 - Varieties of memory barrier.
57 - SMP barrier pairing.
58 - Examples of memory barrier sequences.
64 - Compiler barrier.
74 (*) Inter-CPU acquiring barrier effects.
85 (*) Kernel I/O barrier effects.
[all …]
/Linux-v6.1/arch/sparc/include/asm/
Dbarrier_64.h6 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
10 * It used to be believed that the memory barrier had to be right in the
11 * delay slot, but a case has been traced recently wherein the memory barrier
23 * the memory barrier explicitly into a "branch always, predicted taken"
44 barrier(); \
52 barrier(); \
56 #define __smp_mb__before_atomic() barrier()
57 #define __smp_mb__after_atomic() barrier()
59 #include <asm-generic/barrier.h>
/Linux-v6.1/arch/mips/include/asm/
Dsync.h11 * Two types of barrier are provided:
18 * restrictions imposed by the barrier.
31 * b) Multiple variants of ordering barrier are provided which allow the
34 * than a barrier are observed prior to stores that are younger than a
35 * barrier & don't care about the ordering of loads then the 'wmb'
36 * ordering barrier can be used. Limiting the barrier's effects to stores
49 * A full completion barrier; all memory accesses appearing prior to this sync
56 * For now we use a full completion barrier to implement all sync types, until
66 * barrier since 2010 & omit 'rmb' barriers because the CPUs don't perform
104 * don't implicitly provide a memory barrier. In general this is most MIPS
[all …]
Dbarrier.h86 # define __smp_mb() barrier()
87 # define __smp_rmb() barrier()
88 # define __smp_wmb() barrier()
92 * When LL/SC does imply order, it must also be a compiler barrier to avoid the
124 * a completion barrier immediately preceding the LL instruction. Therefore we
125 * can skip emitting a barrier from __smp_mb__before_atomic().
140 #include <asm-generic/barrier.h>
/Linux-v6.1/tools/virtio/ringtest/
Dmain.h90 /* Compiler barrier - similar to what Linux uses */
91 #define barrier() asm volatile("" ::: "memory") macro
97 #define cpu_relax() barrier()
110 barrier(); in busy_wait()
125 * adds a compiler barrier.
128 barrier(); \
134 barrier(); \
138 #define smp_wmb() barrier()
158 barrier(); \ in __read_once_size()
160 barrier(); \ in __read_once_size()
[all …]
Dvirtio_ring_0_9.c133 /* Barrier A (for pairing) */ in add_inbuf()
140 /* Barrier A (for pairing) */ in add_inbuf()
145 /* Barrier A (for pairing) */ in add_inbuf()
163 /* Barrier B (for pairing) */ in get_buf()
169 /* Barrier B (for pairing) */ in get_buf()
221 /* Barrier D (for pairing) */ in enable_call()
231 /* Barrier C (for pairing) */ in kick_available()
253 /* Barrier C (for pairing) */ in enable_kick()
280 /* Barrier A (for pairing) */ in use_buf()
289 /* Barrier A (for pairing) */ in use_buf()
[all …]
Dring.c130 * add an explicit full barrier to avoid this. in add_inbuf()
132 barrier(); in add_inbuf()
136 /* Barrier A (for pairing) */ in add_inbuf()
151 /* Barrier B (for pairing) */ in get_buf()
182 /* Barrier D (for pairing) */ in enable_call()
192 /* Barrier C (for pairing) */ in kick_available()
214 /* Barrier C (for pairing) */ in enable_kick()
234 /* Barrier A (for pairing) */ in use_buf()
247 /* Barrier B (for pairing) */ in use_buf()
259 /* Barrier D (for pairing) */ in call_used()
/Linux-v6.1/kernel/sched/
Dmembarrier.c13 * barrier before sending the IPI
19 * The memory barrier at the start of membarrier() on CPU0 is necessary in
22 * CPU1 after the IPI-induced memory barrier:
33 * barrier()
40 * point after (b). If the memory barrier at (a) is omitted, then "x = 1"
45 * The timing of the memory barrier at (a) has to ensure that it executes
46 * before the IPI-induced memory barrier on CPU1.
49 * barrier after completing the IPI
55 * The memory barrier at the end of membarrier() on CPU0 is necessary in
63 * barrier()
[all …]
/Linux-v6.1/arch/s390/include/asm/
Dbarrier.h30 #define __rmb() barrier()
31 #define __wmb() barrier()
41 barrier(); \
49 barrier(); \
53 #define __smp_mb__before_atomic() barrier()
54 #define __smp_mb__after_atomic() barrier()
80 #include <asm-generic/barrier.h>
/Linux-v6.1/arch/x86/include/asm/
Dbarrier.h51 /* Prevent speculative execution past this barrier. */
54 #define __dma_rmb() barrier()
55 #define __dma_wmb() barrier()
60 #define __smp_wmb() barrier()
66 barrier(); \
74 barrier(); \
82 #include <asm-generic/barrier.h>
94 * do not require this barrier. This is only required for the
/Linux-v6.1/tools/perf/tests/
Dsigtrap.c124 pthread_barrier_t *barrier = (pthread_barrier_t *)arg; in test_thread() local
128 pthread_barrier_wait(barrier); in test_thread()
137 static int run_test_threads(pthread_t *threads, pthread_barrier_t *barrier) in run_test_threads() argument
141 pthread_barrier_wait(barrier); in run_test_threads()
148 static int run_stress_test(int fd, pthread_t *threads, pthread_barrier_t *barrier) in run_stress_test() argument
156 ret = run_test_threads(threads, barrier); in run_stress_test()
178 pthread_barrier_t barrier; in test__sigtrap() local
187 pthread_barrier_init(&barrier, NULL, NUM_THREADS + 1); in test__sigtrap()
210 if (pthread_create(&threads[i], NULL, test_thread, &barrier)) { in test__sigtrap()
216 ret = run_stress_test(fd, threads, &barrier); in test__sigtrap()
[all …]
/Linux-v6.1/arch/mips/mm/
Dtlb-r3k.c32 #define BARRIER \ macro
49 entry++; /* BARRIER */ in local_flush_tlb_from()
94 start += PAGE_SIZE; /* BARRIER */ in local_flush_tlb_range()
99 if (idx < 0) /* BARRIER */ in local_flush_tlb_range()
131 start += PAGE_SIZE; /* BARRIER */ in local_flush_tlb_kernel_range()
136 if (idx < 0) /* BARRIER */ in local_flush_tlb_kernel_range()
164 BARRIER; in local_flush_tlb_page()
169 if (idx < 0) /* BARRIER */ in local_flush_tlb_page()
203 BARRIER; in __update_tlb()
208 if (idx < 0) { /* BARRIER */ in __update_tlb()
[all …]
/Linux-v6.1/arch/arc/include/asm/
Dbarrier.h15 * Explicit barrier provided by DMB instruction
19 * - DMB guarantees SMP as well as local barrier semantics
20 * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e.
21 * UP: barrier(), SMP: smp_*mb == *mb)
23 * in the general case. Plus it only provides full barrier.
42 #include <asm-generic/barrier.h>
/Linux-v6.1/arch/ia64/include/asm/
Dbarrier.h3 * Memory barrier definitions. This is based on information published
48 #define __smp_mb__before_atomic() barrier()
49 #define __smp_mb__after_atomic() barrier()
59 barrier(); \
67 barrier(); \
72 * The group barrier in front of the rsm & ssm are necessary to ensure
77 #include <asm-generic/barrier.h>
/Linux-v6.1/tools/arch/sparc/include/asm/
Dbarrier_64.h8 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
12 * It used to be believed that the memory barrier had to be right in the
13 * delay slot, but a case has been traced recently wherein the memory barrier
25 * the memory barrier explicitly into a "branch always, predicted taken"
45 barrier(); \
52 barrier(); \
/Linux-v6.1/include/asm-generic/
Dbarrier.h3 * Generic barrier definitions.
61 #define mb() barrier()
113 #define smp_mb() barrier()
117 #define smp_rmb() barrier()
121 #define smp_wmb() barrier()
182 #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
186 #define smp_mb__before_atomic() barrier()
190 #define smp_mb__after_atomic() barrier()
197 barrier(); \
207 barrier(); \
[all …]
/Linux-v6.1/arch/powerpc/kernel/
Dsmp-tbsync.c53 barrier(); in smp_generic_take_timebase()
59 barrier(); in smp_generic_take_timebase()
70 barrier(); in smp_generic_take_timebase()
96 barrier(); in start_contest()
99 barrier(); in start_contest()
104 barrier(); in start_contest()
125 barrier(); in smp_generic_give_timebase()
166 barrier(); in smp_generic_give_timebase()
/Linux-v6.1/arch/alpha/include/asm/
Dirqflags.h35 barrier(); in arch_local_irq_disable()
41 barrier(); in arch_local_irq_save()
47 barrier(); in arch_local_irq_enable()
53 barrier(); in arch_local_irq_restore()
55 barrier(); in arch_local_irq_restore()
/Linux-v6.1/tools/virtio/asm/
Dbarrier.h4 #define barrier() asm volatile("" ::: "memory") macro
6 #define virt_rmb() barrier()
7 #define virt_wmb() barrier()
13 barrier(); \
30 #error Please fill in barrier macros
/Linux-v6.1/tools/build/feature/
Dtest-pthread-barrier.c7 pthread_barrier_t barrier; in main() local
9 pthread_barrier_init(&barrier, NULL, 1); in main()
10 pthread_barrier_wait(&barrier); in main()
11 return pthread_barrier_destroy(&barrier); in main()
/Linux-v6.1/tools/memory-model/Documentation/
Dordering.txt11 1. Barriers (also known as "fences"). A barrier orders some or
40 c. Write memory barrier.
42 d. Read memory barrier.
44 e. Compiler barrier.
60 o The smp_mb() full memory barrier.
67 First, the smp_mb() full memory barrier orders all of the CPU's prior
122 to also rely on its additional full-memory-barrier semantics. Just please
173 Write Memory Barrier
176 The Linux kernel's write memory barrier is smp_wmb(). If a CPU executes
205 Read Memory Barrier
[all …]

12345678910>>...50