Home
last modified time | relevance | path

Searched refs:smp_load_acquire (Results 1 – 25 of 88) sorted by relevance

1234

/Linux-v5.4/include/asm-generic/
Dbarrier.h156 #ifndef smp_load_acquire
157 #define smp_load_acquire(p) __smp_load_acquire(p) macro
183 #ifndef smp_load_acquire
184 #define smp_load_acquire(p) \ macro
/Linux-v5.4/tools/memory-model/litmus-tests/
DMP+pooncerelease+poacquireonce.litmus7 * smp_load_acquire() provide sufficient ordering for the message-passing
24 r0 = smp_load_acquire(y);
DMP+polockonce+poacquiresilsil.litmus9 * the smp_load_acquire() executed before the lock was acquired (loosely
29 r1 = smp_load_acquire(x);
DMP+polockmbonce+poacquiresilsil.litmus9 * returns false and the second true, we know that the smp_load_acquire()
30 r1 = smp_load_acquire(x);
DISA2+pooncerelease+poacquirerelease+poacquireonce.litmus26 r0 = smp_load_acquire(y);
35 r0 = smp_load_acquire(z);
DS+fencewmbonceonce+poacquireonce.litmus23 r0 = smp_load_acquire(y);
DLB+poacquireonce+pooncerelease.litmus25 r0 = smp_load_acquire(y);
DMP+fencewmbonceonce+fencermbonceonce.litmus8 * is usually better to use smp_store_release() and smp_load_acquire().
DS+poonceonces.litmus8 * is replaced by WRITE_ONCE() and the smp_load_acquire() replaced by
DISA2+poonceonces.litmus9 * of the smp_load_acquire() invocations are replaced by READ_ONCE()?
/Linux-v5.4/tools/include/asm/
Dbarrier.h58 #ifndef smp_load_acquire
59 # define smp_load_acquire(p) \ macro
/Linux-v5.4/lib/
Dstackdepot.c82 if (smp_load_acquire(&next_slab_inited)) in init_stack_slab()
241 found = find_stack(smp_load_acquire(bucket), entries, in stack_depot_save()
254 if (unlikely(!smp_load_acquire(&next_slab_inited))) { in stack_depot_save()
Dllist.c57 entry = smp_load_acquire(&head->first); in llist_del_first()
/Linux-v5.4/arch/arm/include/asm/
Dmcs_spinlock.h13 while (!(smp_load_acquire(lock))) \
/Linux-v5.4/scripts/atomic/fallbacks/
Dread_acquire5 return smp_load_acquire(&(v)->counter);
/Linux-v5.4/tools/arch/ia64/include/asm/
Dbarrier.h55 #define smp_load_acquire(p) \ macro
/Linux-v5.4/tools/arch/s390/include/asm/
Dbarrier.h37 #define smp_load_acquire(p) \ macro
/Linux-v5.4/tools/include/linux/
Dring_buffer.h59 return smp_load_acquire(&base->data_head); in ring_buffer_read_head()
/Linux-v5.4/tools/arch/powerpc/include/asm/
Dbarrier.h39 #define smp_load_acquire(p) \ macro
/Linux-v5.4/tools/arch/sparc/include/asm/
Dbarrier_64.h49 #define smp_load_acquire(p) \ macro
/Linux-v5.4/tools/arch/x86/include/asm/
Dbarrier.h39 #define smp_load_acquire(p) \ macro
/Linux-v5.4/include/rdma/
Drdmavt_cq.h68 #define RDMA_READ_UAPI_ATOMIC(member) smp_load_acquire(&(member).val)
/Linux-v5.4/drivers/media/dvb-core/
Ddvb_ringbuffer.c60 return (rbuf->pread == smp_load_acquire(&rbuf->pwrite)); in dvb_ringbuffer_empty()
90 avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread; in dvb_ringbuffer_avail()
106 smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite)); in dvb_ringbuffer_flush()
/Linux-v5.4/tools/arch/arm64/include/asm/
Dbarrier.h64 #define smp_load_acquire(p) \ macro
/Linux-v5.4/tools/memory-model/Documentation/
Drecipes.txt216 Use of smp_store_release() and smp_load_acquire() is one way to force
228 r0 = smp_load_acquire(&y);
233 store, while the smp_load_acquire macro orders the load against any
246 use of smp_store_release() and smp_load_acquire(), except that both
291 and to use smp_load_acquire() instead of smp_rmb(). However, the older
421 r0 = smp_load_acquire(y);
427 r1 = smp_load_acquire(z);
435 example, ordering would still be preserved if CPU1()'s smp_load_acquire()
450 r0 = smp_load_acquire(y);

1234