/Linux-v4.19/Documentation/translations/ko_KR/ |
D | memory-barriers.txt | 261 Q = READ_ONCE(P); D = READ_ONCE(*Q); 268 READ_ONCE() 는 메모리 배리어 명령도 내게 되어 있어서, DEC Alpha CPU 는 273 DEC Alpha 에서 수행되든 아니든, READ_ONCE() 는 컴파일러로부터의 악영향 279 a = READ_ONCE(*X); WRITE_ONCE(*X, b); 287 WRITE_ONCE(*X, c); d = READ_ONCE(*X); 298 (*) 컴파일러가 READ_ONCE() 나 WRITE_ONCE() 로 보호되지 않은 메모리 액세스를 582 리눅스 커널 v4.15 기준으로, smp_read_barrier_depends() 가 READ_ONCE() 에 584 전용 코드를 만드는 사람들과 READ_ONCE() 자체를 만드는 사람들 뿐임을 의미합니다. 598 Q = READ_ONCE(P); 625 Q = READ_ONCE(P); [all …]
|
/Linux-v4.19/arch/arm64/mm/ |
D | mmu.c | 136 pte_t old_pte = READ_ONCE(*ptep); in init_pte() 145 READ_ONCE(pte_val(*ptep)))); in init_pte() 160 pmd_t pmd = READ_ONCE(*pmdp); in alloc_init_cont_pte() 168 pmd = READ_ONCE(*pmdp); in alloc_init_cont_pte() 197 pmd_t old_pmd = READ_ONCE(*pmdp); in init_pmd() 211 READ_ONCE(pmd_val(*pmdp)))); in init_pmd() 217 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); in init_pmd() 231 pud_t pud = READ_ONCE(*pudp); in alloc_init_cont_pmd() 242 pud = READ_ONCE(*pudp); in alloc_init_cont_pmd() 281 pgd_t pgd = READ_ONCE(*pgdp); in alloc_init_pud() [all …]
|
D | pageattr.c | 32 pte_t pte = READ_ONCE(*ptep); in change_page_range() 166 if (pgd_none(READ_ONCE(*pgdp))) in kernel_page_present() 170 pud = READ_ONCE(*pudp); in kernel_page_present() 177 pmd = READ_ONCE(*pmdp); in kernel_page_present() 184 return pte_valid(READ_ONCE(*ptep)); in kernel_page_present()
|
D | kasan_init.c | 50 if (pmd_none(READ_ONCE(*pmdp))) { in kasan_pte_offset() 63 if (pud_none(READ_ONCE(*pudp))) { in kasan_pmd_offset() 75 if (pgd_none(READ_ONCE(*pgdp))) { in kasan_pud_offset() 95 } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep))); in kasan_pte_populate() 107 } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp))); in kasan_pmd_populate() 119 } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp))); in kasan_pud_populate() 164 set_pgd(pgdp_new, READ_ONCE(*pgdp)); in kasan_copy_shadow()
|
/Linux-v4.19/arch/s390/kernel/ |
D | idle.c | 61 idle_count = READ_ONCE(idle->idle_count); in show_idle_count() 62 if (READ_ONCE(idle->clock_idle_enter)) in show_idle_count() 79 idle_time = READ_ONCE(idle->idle_time); in show_idle_time() 80 idle_enter = READ_ONCE(idle->clock_idle_enter); in show_idle_time() 81 idle_exit = READ_ONCE(idle->clock_idle_exit); in show_idle_time() 97 idle_enter = READ_ONCE(idle->clock_idle_enter); in arch_cpu_idle_time() 98 idle_exit = READ_ONCE(idle->clock_idle_exit); in arch_cpu_idle_time()
|
/Linux-v4.19/arch/s390/lib/ |
D | spinlock.c | 131 old = READ_ONCE(lp->lock); in arch_spin_lock_queued() 168 while (READ_ONCE(node->prev) != NULL) { in arch_spin_lock_queued() 182 old = READ_ONCE(lp->lock); in arch_spin_lock_queued() 202 while ((next = READ_ONCE(node->next)) == NULL) in arch_spin_lock_queued() 218 owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL); in arch_spin_lock_classic() 259 owner = READ_ONCE(lp->lock); in arch_spin_trylock_retry() 273 while (READ_ONCE(rw->cnts) & 0x10000) in arch_read_lock_wait() 285 while (READ_ONCE(rw->cnts) & 0x10000) in arch_read_lock_wait() 302 old = READ_ONCE(rw->cnts); in arch_write_lock_wait() 318 cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK; in arch_spin_relax()
|
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx4/ |
D | en_port.c | 165 packets += READ_ONCE(ring->packets); in mlx4_en_fold_software_stats() 166 bytes += READ_ONCE(ring->bytes); in mlx4_en_fold_software_stats() 176 packets += READ_ONCE(ring->packets); in mlx4_en_fold_software_stats() 177 bytes += READ_ONCE(ring->bytes); in mlx4_en_fold_software_stats() 252 sw_rx_dropped += READ_ONCE(ring->dropped); in mlx4_en_DUMP_ETH_STATS() 253 priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok); in mlx4_en_DUMP_ETH_STATS() 254 priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none); in mlx4_en_DUMP_ETH_STATS() 255 priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete); in mlx4_en_DUMP_ETH_STATS() 256 priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages); in mlx4_en_DUMP_ETH_STATS() 257 priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop); in mlx4_en_DUMP_ETH_STATS() [all …]
|
/Linux-v4.19/arch/s390/include/asm/ |
D | preempt.h | 15 return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED; in preempt_count() 23 old = READ_ONCE(S390_lowcore.preempt_count); in preempt_count_set() 48 return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED); in test_preempt_need_resched() 71 return unlikely(READ_ONCE(S390_lowcore.preempt_count) == in should_resched() 81 return READ_ONCE(S390_lowcore.preempt_count); in preempt_count()
|
/Linux-v4.19/tools/memory-model/litmus-tests/ |
D | SB+rfionceonce-poonceonces.litmus | 17 r1 = READ_ONCE(*x); 18 r2 = READ_ONCE(*y); 27 r3 = READ_ONCE(*y); 28 r4 = READ_ONCE(*x);
|
D | ISA2+poonceonces.litmus | 9 * of the smp_load_acquire() invocations are replaced by READ_ONCE()? 24 r0 = READ_ONCE(*y); 33 r0 = READ_ONCE(*z); 34 r1 = READ_ONCE(*x);
|
D | IRIW+poonceonces+OnceOnce.litmus | 25 r0 = READ_ONCE(*x); 26 r1 = READ_ONCE(*y); 39 r0 = READ_ONCE(*y); 40 r1 = READ_ONCE(*x);
|
D | IRIW+fencembonceonces+OnceOnce.litmus | 25 r0 = READ_ONCE(*x); 27 r1 = READ_ONCE(*y); 40 r0 = READ_ONCE(*y); 42 r1 = READ_ONCE(*x);
|
D | WRC+poonceonces+Once.litmus | 22 r0 = READ_ONCE(*x); 31 r0 = READ_ONCE(*y); 32 r1 = READ_ONCE(*x);
|
/Linux-v4.19/drivers/lightnvm/ |
D | pblk-rb.c | 145 flags = READ_ONCE(w_ctx->flags); in clean_wctx() 165 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_space() 166 unsigned int sync = READ_ONCE(rb->sync); in pblk_rb_space() 177 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_read_count() 178 unsigned int subm = READ_ONCE(rb->subm); in pblk_rb_read_count() 185 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_sync_count() 186 unsigned int sync = READ_ONCE(rb->sync); in pblk_rb_sync_count() 195 subm = READ_ONCE(rb->subm); in pblk_rb_read_commit() 217 flags = READ_ONCE(entry->w_ctx.flags); in __pblk_rb_update_l2p() 310 flags = READ_ONCE(entry->w_ctx.flags); in pblk_rb_write_entry_user() [all …]
|
/Linux-v4.19/drivers/powercap/ |
D | idle_inject.c | 111 duration_ms = READ_ONCE(ii_dev->run_duration_ms); in idle_inject_timer_fn() 112 duration_ms += READ_ONCE(ii_dev->idle_duration_ms); in idle_inject_timer_fn() 141 play_idle(READ_ONCE(ii_dev->idle_duration_ms)); in idle_inject_fn() 168 *run_duration_ms = READ_ONCE(ii_dev->run_duration_ms); in idle_inject_get_duration() 169 *idle_duration_ms = READ_ONCE(ii_dev->idle_duration_ms); in idle_inject_get_duration() 184 unsigned int idle_duration_ms = READ_ONCE(ii_dev->idle_duration_ms); in idle_inject_start() 185 unsigned int run_duration_ms = READ_ONCE(ii_dev->run_duration_ms); in idle_inject_start()
|
/Linux-v4.19/include/linux/ |
D | srcutiny.h | 75 idx = READ_ONCE(sp->srcu_idx); in __srcu_read_lock() 96 idx = READ_ONCE(sp->srcu_idx) & 0x1; in srcu_torture_stats_print() 99 READ_ONCE(sp->srcu_lock_nesting[!idx]), in srcu_torture_stats_print() 100 READ_ONCE(sp->srcu_lock_nesting[idx])); in srcu_torture_stats_print()
|
/Linux-v4.19/lib/ |
D | errseq.c | 71 old = READ_ONCE(*eseq); in errseq_set() 124 errseq_t old = READ_ONCE(*eseq); in errseq_sample() 146 errseq_t cur = READ_ONCE(*eseq); in errseq_check() 184 old = READ_ONCE(*eseq); in errseq_check_and_advance()
|
/Linux-v4.19/kernel/locking/ |
D | qspinlock_paravirt.h | 123 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 248 if (READ_ONCE(he->lock) == lock) { in pv_unhash() 249 node = READ_ONCE(he->node); in pv_unhash() 274 return READ_ONCE(prev->state) != vcpu_running || vcpu_is_preempted(prev->cpu); in pv_wait_early() 304 if (READ_ONCE(node->locked)) in pv_wait_node() 324 if (!READ_ONCE(node->locked)) { in pv_wait_node() 344 qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked)); in pv_wait_node() 415 if (READ_ONCE(pn->state) == vcpu_hashed) in pv_wait_head_or_lock()
|
/Linux-v4.19/drivers/xen/ |
D | time.c | 42 h = READ_ONCE(p32[1]); in get64() 43 l = READ_ONCE(p32[0]); in get64() 44 h2 = READ_ONCE(p32[1]); in get64() 49 ret = READ_ONCE(*p); in get64() 67 *res = READ_ONCE(*state); in xen_get_runstate_snapshot_cpu_delta()
|
/Linux-v4.19/net/rxrpc/ |
D | proc.c | 83 local = READ_ONCE(rx->local); in rxrpc_call_seq_show() 99 timeout = READ_ONCE(call->expect_rx_by); in rxrpc_call_seq_show() 103 tx_hard_ack = READ_ONCE(call->tx_hard_ack); in rxrpc_call_seq_show() 104 rx_hard_ack = READ_ONCE(call->rx_hard_ack); in rxrpc_call_seq_show() 118 tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack, in rxrpc_call_seq_show() 119 rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack, in rxrpc_call_seq_show()
|
/Linux-v4.19/kernel/rcu/ |
D | tree.c | 212 return READ_ONCE(rnp->qsmaskinitnext); in rcu_rnp_online_cpus() 558 return READ_ONCE(rcu_state_p->gp_seq); in rcu_get_gp_seq() 567 return READ_ONCE(rcu_sched_state.gp_seq); in rcu_sched_get_gp_seq() 576 return READ_ONCE(rcu_bh_state.gp_seq); in rcu_bh_get_gp_seq() 688 *flags = READ_ONCE(rsp->gp_flags); in rcutorture_get_gp_data() 1196 READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) && in rcu_implicit_dynticks_qs() 1245 if (!READ_ONCE(*rnhqp) && in rcu_implicit_dynticks_qs() 1285 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs); in record_gp_stall_check_time() 1307 gpa = READ_ONCE(rsp->gp_activity); in rcu_check_gp_kthread_starvation() 1356 j = READ_ONCE(rsp->jiffies_kick_kthreads); in rcu_stall_kick_kthreads() [all …]
|
D | srcutiny.c | 112 if (!newval && READ_ONCE(sp->srcu_gp_waiting)) in __srcu_read_unlock() 130 if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head)) in srcu_drive_gp() 143 swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx])); in srcu_drive_gp() 162 if (READ_ONCE(sp->srcu_cb_head)) in srcu_drive_gp() 182 if (!READ_ONCE(sp->srcu_gp_running)) in call_srcu()
|
D | update.c | 135 return READ_ONCE(rcu_normal) && in rcu_gp_is_normal() 460 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); in rcu_jiffies_till_stall_check() 568 if (needwake && READ_ONCE(rcu_tasks_kthread_ptr)) in call_rcu_tasks() 636 if (!READ_ONCE(t->rcu_tasks_holdout) || in check_holdout_task() 637 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || in check_holdout_task() 638 !READ_ONCE(t->on_rq) || in check_holdout_task() 727 if (t != current && READ_ONCE(t->on_rq) && in rcu_tasks_kthread() 730 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); in rcu_tasks_kthread() 772 rtst = READ_ONCE(rcu_task_stall_timeout); in rcu_tasks_kthread()
|
D | srcutree.c | 263 sum += READ_ONCE(cpuc->srcu_lock_count[idx]); in srcu_readers_lock_idx() 280 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); in srcu_readers_unlock_idx() 349 sum += READ_ONCE(cpuc->srcu_lock_count[0]); in srcu_readers_active() 350 sum += READ_ONCE(cpuc->srcu_lock_count[1]); in srcu_readers_active() 351 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); in srcu_readers_active() 352 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); in srcu_readers_active() 365 if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), in srcu_get_delay() 366 READ_ONCE(sp->srcu_gp_seq_needed_exp))) in srcu_get_delay() 393 if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || in _cleanup_srcu_struct() 396 __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); in _cleanup_srcu_struct() [all …]
|
/Linux-v4.19/arch/arm64/kernel/ |
D | hibernate.c | 220 if (pgd_none(READ_ONCE(*pgdp))) { in create_safe_exec_page() 230 if (pud_none(READ_ONCE(*pudp))) { in create_safe_exec_page() 240 if (pmd_none(READ_ONCE(*pmdp))) { in create_safe_exec_page() 336 pte_t pte = READ_ONCE(*src_ptep); in _copy_pte() 390 if (pud_none(READ_ONCE(*dst_pudp))) { in copy_pmd() 400 pmd_t pmd = READ_ONCE(*src_pmdp); in copy_pmd() 425 if (pgd_none(READ_ONCE(*dst_pgdp))) { in copy_pud() 435 pud_t pud = READ_ONCE(*src_pudp); in copy_pud() 462 if (pgd_none(READ_ONCE(*src_pgdp))) in copy_page_tables()
|