| /Linux-v5.4/include/asm-generic/ |
| D | barrier.h | 156 #ifndef smp_load_acquire 157 #define smp_load_acquire(p) __smp_load_acquire(p) macro 183 #ifndef smp_load_acquire 184 #define smp_load_acquire(p) \ macro
|
| /Linux-v5.4/tools/memory-model/litmus-tests/ |
| D | MP+pooncerelease+poacquireonce.litmus | 7 * smp_load_acquire() provide sufficient ordering for the message-passing 24 r0 = smp_load_acquire(y);
|
| D | MP+polockonce+poacquiresilsil.litmus | 9 * the smp_load_acquire() executed before the lock was acquired (loosely 29 r1 = smp_load_acquire(x);
|
| D | MP+polockmbonce+poacquiresilsil.litmus | 9 * returns false and the second true, we know that the smp_load_acquire() 30 r1 = smp_load_acquire(x);
|
| D | ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus | 26 r0 = smp_load_acquire(y); 35 r0 = smp_load_acquire(z);
|
| D | S+fencewmbonceonce+poacquireonce.litmus | 23 r0 = smp_load_acquire(y);
|
| D | LB+poacquireonce+pooncerelease.litmus | 25 r0 = smp_load_acquire(y);
|
| D | MP+fencewmbonceonce+fencermbonceonce.litmus | 8 * is usually better to use smp_store_release() and smp_load_acquire().
|
| D | S+poonceonces.litmus | 8 * is replaced by WRITE_ONCE() and the smp_load_acquire() replaced by
|
| D | ISA2+poonceonces.litmus | 9 * of the smp_load_acquire() invocations are replaced by READ_ONCE()?
|
| /Linux-v5.4/tools/include/asm/ |
| D | barrier.h | 58 #ifndef smp_load_acquire 59 # define smp_load_acquire(p) \ macro
|
| /Linux-v5.4/lib/ |
| D | stackdepot.c | 82 if (smp_load_acquire(&next_slab_inited)) in init_stack_slab() 241 found = find_stack(smp_load_acquire(bucket), entries, in stack_depot_save() 254 if (unlikely(!smp_load_acquire(&next_slab_inited))) { in stack_depot_save()
|
| D | llist.c | 57 entry = smp_load_acquire(&head->first); in llist_del_first()
|
| /Linux-v5.4/arch/arm/include/asm/ |
| D | mcs_spinlock.h | 13 while (!(smp_load_acquire(lock))) \
|
| /Linux-v5.4/scripts/atomic/fallbacks/ |
| D | read_acquire | 5 return smp_load_acquire(&(v)->counter);
|
| /Linux-v5.4/tools/arch/ia64/include/asm/ |
| D | barrier.h | 55 #define smp_load_acquire(p) \ macro
|
| /Linux-v5.4/tools/arch/s390/include/asm/ |
| D | barrier.h | 37 #define smp_load_acquire(p) \ macro
|
| /Linux-v5.4/tools/include/linux/ |
| D | ring_buffer.h | 59 return smp_load_acquire(&base->data_head); in ring_buffer_read_head()
|
| /Linux-v5.4/tools/arch/powerpc/include/asm/ |
| D | barrier.h | 39 #define smp_load_acquire(p) \ macro
|
| /Linux-v5.4/tools/arch/sparc/include/asm/ |
| D | barrier_64.h | 49 #define smp_load_acquire(p) \ macro
|
| /Linux-v5.4/tools/arch/x86/include/asm/ |
| D | barrier.h | 39 #define smp_load_acquire(p) \ macro
|
| /Linux-v5.4/include/rdma/ |
| D | rdmavt_cq.h | 68 #define RDMA_READ_UAPI_ATOMIC(member) smp_load_acquire(&(member).val)
|
| /Linux-v5.4/drivers/media/dvb-core/ |
| D | dvb_ringbuffer.c | 60 return (rbuf->pread == smp_load_acquire(&rbuf->pwrite)); in dvb_ringbuffer_empty() 90 avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread; in dvb_ringbuffer_avail() 106 smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite)); in dvb_ringbuffer_flush()
|
| /Linux-v5.4/tools/arch/arm64/include/asm/ |
| D | barrier.h | 64 #define smp_load_acquire(p) \ macro
|
| /Linux-v5.4/tools/memory-model/Documentation/ |
| D | recipes.txt | 216 Use of smp_store_release() and smp_load_acquire() is one way to force 228 r0 = smp_load_acquire(&y); 233 store, while the smp_load_acquire macro orders the load against any 246 use of smp_store_release() and smp_load_acquire(), except that both 291 and to use smp_load_acquire() instead of smp_rmb(). However, the older 421 r0 = smp_load_acquire(y); 427 r1 = smp_load_acquire(z); 435 example, ordering would still be preserved if CPU1()'s smp_load_acquire() 450 r0 = smp_load_acquire(y);
|