| /Linux-v5.15/kernel/rcu/ |
| D | tree_stall.h | 451 data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, in print_cpu_stall_info() 468 data_race(READ_ONCE(rcu_state.gp_flags)), in rcu_check_gp_kthread_starvation() 470 data_race(READ_ONCE(rcu_state.gp_state)), in rcu_check_gp_kthread_starvation() 471 gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu); in rcu_check_gp_kthread_starvation() 512 data_race(rcu_state.gp_flags), in rcu_check_gp_kthread_expired_fqs_timer() 514 data_race(READ_ONCE(gpk->__state))); in rcu_check_gp_kthread_expired_fqs_timer() 573 gpa = data_race(READ_ONCE(rcu_state.gp_activity)); in print_other_cpu_stall() 576 data_race(READ_ONCE(jiffies_till_next_fqs)), in print_other_cpu_stall() 577 data_race(READ_ONCE(rcu_get_root()->qsmask))); in print_other_cpu_stall() 770 if (data_race(READ_ONCE(rnp->qsmask))) { in rcu_check_boost_fail() [all …]
|
| D | tasks.h | 142 int i = data_race(rtp->gp_state); // Let KCSAN detect update races in tasks_gp_state_getname() 285 tasks_gp_state_getname(rtp), data_race(rtp->gp_state), in show_rcu_tasks_generic_gp_kthread() 286 jiffies - data_race(rtp->gp_jiffies), in show_rcu_tasks_generic_gp_kthread() 287 data_race(rtp->n_gps), in show_rcu_tasks_generic_gp_kthread() 288 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), in show_rcu_tasks_generic_gp_kthread() 289 ".k"[!!data_race(rtp->kthread_ptr)], in show_rcu_tasks_generic_gp_kthread() 290 ".C"[!!data_race(rtp->cbs_head)], in show_rcu_tasks_generic_gp_kthread() 1295 data_race(n_heavy_reader_ofl_updates), in show_rcu_tasks_trace_gp_kthread() 1296 data_race(n_heavy_reader_updates), in show_rcu_tasks_trace_gp_kthread() 1297 data_race(n_heavy_reader_attempts)); in show_rcu_tasks_trace_gp_kthread()
|
| D | tree_exp.h | 546 data_race(rnp_root->expmask), in synchronize_rcu_expedited_wait() 547 ".T"[!!data_race(rnp_root->exp_tasks)]); in synchronize_rcu_expedited_wait() 557 data_race(rnp->expmask), in synchronize_rcu_expedited_wait() 558 ".T"[!!data_race(rnp->exp_tasks)]); in synchronize_rcu_expedited_wait()
|
| D | srcutree.c | 1356 u0 = data_race(sdp->srcu_unlock_count[!idx]); in srcu_torture_stats_print() 1357 u1 = data_race(sdp->srcu_unlock_count[idx]); in srcu_torture_stats_print() 1365 l0 = data_race(sdp->srcu_lock_count[!idx]); in srcu_torture_stats_print() 1366 l1 = data_race(sdp->srcu_lock_count[idx]); in srcu_torture_stats_print()
|
| D | rcutorture.c | 1787 data_race(n_barrier_successes), in rcu_torture_stats_print() 1788 data_race(n_barrier_attempts), in rcu_torture_stats_print() 1789 data_race(n_rcu_torture_barrier_error)); in rcu_torture_stats_print() 1790 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. in rcu_torture_stats_print()
|
| D | tree_plugin.h | 786 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks), in dump_blkd_tasks()
|
| /Linux-v5.15/tools/memory-model/Documentation/ |
| D | access-marking.txt | 19 2. Data-race marking, for example, "data_race(a = b);" 31 WRITE_ONCE(a, b + data_race(c + d) + READ_ONCE(e)); 33 Neither plain C-language accesses nor data_race() (#1 and #2 above) place 39 preferable to data_race(), which in turn is usually preferable to plain 41 data_race(READ_ONCE(a)), which will both restrict compiler optimizations 46 race with one of data_race(), READ_ONCE(), or WRITE_ONCE(), will prevent 50 ill-considered additions of data_race(), READ_ONCE(), and WRITE_ONCE() 54 data_race() and even plain C-language accesses is preferable to 58 Use of the data_race() Macro 61 Here are some situations where data_race() should be used instead of [all …]
|
| /Linux-v5.15/include/linux/ |
| D | srcutiny.h | 84 idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1; in srcu_torture_stats_print() 87 data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])), in srcu_torture_stats_print() 88 data_race(READ_ONCE(ssp->srcu_lock_nesting[idx]))); in srcu_torture_stats_print()
|
| D | compiler.h | 214 #define data_race(expr) \ macro
|
| /Linux-v5.15/mm/ |
| D | frontswap.c | 65 data_race(frontswap_loads++); in inc_frontswap_loads() 69 data_race(frontswap_succ_stores++); in inc_frontswap_succ_stores() 73 data_race(frontswap_failed_stores++); in inc_frontswap_failed_stores() 77 data_race(frontswap_invalidates++); in inc_frontswap_invalidates()
|
| D | page_io.c | 69 if (data_race(!(sis->flags & SWP_BLKDEV))) in swap_slot_free_notify() 293 if (data_race(sis->flags & SWP_FS_OPS)) { in __swap_writepage() 382 if (data_race(sis->flags & SWP_FS_OPS)) { in swap_readpage() 446 if (data_race(sis->flags & SWP_FS_OPS)) { in swap_set_page_dirty()
|
| D | page_counter.c | 129 data_race(c->failcnt++); in page_counter_try_charge()
|
| D | swap.c | 601 if (data_race(pagevec_count(pvec))) { in lru_add_drain_cpu() 820 data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) || in __lru_add_drain_all()
|
| D | swap_state.c | 61 #define INC_CACHE_INFO(x) data_race(swap_cache_info.x++) 62 #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr))
|
| D | swapfile.c | 954 if (data_race(!si->swap_map[offset])) { in scan_swap_map_slots() 971 if (data_race(!si->swap_map[offset])) { in scan_swap_map_slots() 1134 if (data_race(!(p->flags & SWP_USED))) in __swap_info_get() 1160 if (data_race(!p->swap_map[swp_offset(entry)])) in _swap_info_get()
|
| /Linux-v5.15/security/tomoyo/ |
| D | util.c | 1068 data_race(perm = container_of(ptr, struct tomoyo_path_acl, head)->perm); in tomoyo_domain_quota_is_ok() 1071 data_race(perm = container_of(ptr, struct tomoyo_path2_acl, head)->perm); in tomoyo_domain_quota_is_ok() 1074 data_race(perm = container_of(ptr, struct tomoyo_path_number_acl, head) in tomoyo_domain_quota_is_ok() 1078 data_race(perm = container_of(ptr, struct tomoyo_mkdev_acl, head)->perm); in tomoyo_domain_quota_is_ok() 1081 data_race(perm = container_of(ptr, struct tomoyo_inet_acl, head)->perm); in tomoyo_domain_quota_is_ok() 1084 data_race(perm = container_of(ptr, struct tomoyo_unix_acl, head)->perm); in tomoyo_domain_quota_is_ok()
|
| /Linux-v5.15/kernel/locking/ |
| D | osq_lock.c | 161 if (data_race(prev->next) == node && in osq_lock()
|
| D | locktorture.c | 744 long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0; in __torture_print_stats() 749 if (data_race(statp[i].n_lock_fail)) in __torture_print_stats() 751 cur = data_race(statp[i].n_lock_acquired); in __torture_print_stats()
|
| /Linux-v5.15/mm/kfence/ |
| D | core.c | 844 distance = addr - data_race(meta->addr + meta->size); in kfence_handle_page_fault() 850 if (!to_report || distance > data_race(meta->addr) - addr) in kfence_handle_page_fault()
|
| /Linux-v5.15/kernel/irq/ |
| D | irqdesc.c | 952 return data_race(desc->tot_count); in kstat_irqs() 955 sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu)); in kstat_irqs()
|
| D | proc.c | 493 any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j)); in show_interrupts()
|
| /Linux-v5.15/fs/jbd2/ |
| D | transaction.c | 368 if (!data_race(journal->j_running_transaction)) { in start_this_handle() 1499 if (data_race(jh->b_transaction != transaction && in jbd2_journal_dirty_metadata() 1508 if (data_race(jh->b_transaction == transaction && in jbd2_journal_dirty_metadata()
|
| /Linux-v5.15/kernel/ |
| D | scftorture.c | 164 invoked_count += data_race(per_cpu(scf_invoked_count, cpu)); in scf_torture_stats_print()
|
| /Linux-v5.15/net/ipv4/ |
| D | af_inet.c | 578 if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk)) in inet_dgram_connect() 806 if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind && in inet_send_prepare()
|
| /Linux-v5.15/kernel/kcsan/ |
| D | kcsan_test.c | 298 static noinline void test_kernel_data_race(void) { data_race(test_var++); } in test_kernel_data_race()
|