Lines Matching +full:strong +full:- +full:pull +full:- +full:up

1 // SPDX-License-Identifier: GPL-2.0
52 /* Per-CPU kcsan_ctx for interrupts */
61 * The purpose is 2-fold:
69 * given this should be rare, this is a reasonable trade-off to make, since this
85 * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
96 * zero-initialized state matches INVALID_WATCHPOINT.
98 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
99 * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
101 static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
105 * per-CPU counter to avoid excessive contention.
156 …BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoi… in insert_watchpoint()
157 …ILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoi… in insert_watchpoint()
177 * 2. the thread that set up the watchpoint already removed it;
178 * 3. the watchpoint was removed and then re-used.
192 /* Remove the watchpoint -- its slot may be reused after. */
204 return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx); in get_ctx()
210 /* Check scoped accesses; never inline because this is a slow-path! */
216 if (ctx->disable_scoped) in kcsan_check_scoped_accesses()
219 ctx->disable_scoped++; in kcsan_check_scoped_accesses()
220 list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) { in kcsan_check_scoped_accesses()
221 check_access(scoped_access->ptr, scoped_access->size, in kcsan_check_scoped_accesses()
222 scoped_access->type, scoped_access->ip); in kcsan_check_scoped_accesses()
224 ctx->disable_scoped--; in kcsan_check_scoped_accesses()
227 /* Rules for generic atomic accesses. Called from fast-path. */
245 return true; /* Assume aligned writes up to word size are atomic. */ in is_atomic()
247 if (ctx->atomic_next > 0) { in is_atomic()
253 * reasonable trade-off to make, since this case should be in is_atomic()
258 --ctx->atomic_next; /* in task, or outer interrupt */ in is_atomic()
262 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic; in is_atomic()
269 * Never set up watchpoints when memory operations are atomic. in should_watch()
291 * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
306 long skip_count = kcsan_skip_watch - in reset_kcsan_skip()
315 return READ_ONCE(kcsan_enabled) && !ctx->disable_count; in kcsan_is_enabled()
326 delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ? in delay_access()
334 * detection is currently done for accesses up to a size of 8 bytes.
350 task->kcsan_save_irqtrace = task->irqtrace; in kcsan_save_irqtrace()
357 task->irqtrace = task->kcsan_save_irqtrace; in kcsan_restore_irqtrace()
364 return current->kcsan_stack_depth; in get_kcsan_stack_depth()
374 current->kcsan_stack_depth += val; in add_kcsan_stack_depth()
383 return ctx->disable_scoped ? NULL : &ctx->reorder_access; in get_reorder_access()
402 return reorder_access->ptr == ptr && reorder_access->size == size && in find_reorder_access()
403 reorder_access->type == type && reorder_access->ip == ip; in find_reorder_access()
420 ctx->disable_scoped++; in set_reorder_access()
422 reorder_access->ptr = ptr; in set_reorder_access()
423 reorder_access->size = size; in set_reorder_access()
424 reorder_access->type = type | KCSAN_ACCESS_SCOPED; in set_reorder_access()
425 reorder_access->ip = ip; in set_reorder_access()
426 reorder_access->stack_depth = get_kcsan_stack_depth(); in set_reorder_access()
428 ctx->disable_scoped--; in set_reorder_access()
432 * Pull everything together: check_access() below contains the performance
433 * critical operations; the fast-path (including check_access) functions should
436 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
437 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
457 * We know a watchpoint exists. Let's try to keep the race-window in kcsan_found_watchpoint()
459 * possible -- avoid unneccessarily complex code until consumed. in kcsan_found_watchpoint()
466 * The access_mask check relies on value-change comparison. To avoid in kcsan_found_watchpoint()
467 * reporting a race where e.g. the writer set up the watchpoint, but the in kcsan_found_watchpoint()
472 if (ctx->access_mask && !find_reorder_access(ctx, ptr, size, type, ip)) in kcsan_found_watchpoint()
496 kcsan_report_set_info(ptr, size, type, ip, watchpoint - watchpoints); in kcsan_found_watchpoint()
526 unsigned long access_mask = ctx->access_mask; in kcsan_setup_watchpoint()
531 * Always reset kcsan_skip counter in slow-path to avoid underflow; see in kcsan_setup_watchpoint()
540 * Check to-ignore addresses after kcsan_is_enabled(), as we may access in kcsan_setup_watchpoint()
564 * Assume setting up a watchpoint for a non-scoped (normal) access that in kcsan_setup_watchpoint()
569 ctx->disable_scoped++; in kcsan_setup_watchpoint()
596 * was modified via a non-instrumented access, e.g. from a device. in kcsan_setup_watchpoint()
607 * Re-read value, and check if it is as expected; if not, we infer a in kcsan_setup_watchpoint()
630 * non-zero diff); if it is to be ignored, the below rules for in kcsan_setup_watchpoint()
646 * value-change, as it is likely that races on in kcsan_setup_watchpoint()
651 /* Always assume a value-change. */ in kcsan_setup_watchpoint()
668 value_change, watchpoint - watchpoints, in kcsan_setup_watchpoint()
694 ctx->disable_scoped--; in kcsan_setup_watchpoint()
722 * Avoid user_access_save in fast-path: find_watchpoint is safe without in check_access()
731 * slow-path, as long as no state changes that cause a race to be in check_access()
739 struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */ in check_access()
754 ptr = reorder_access->ptr; in check_access()
755 type = reorder_access->type; in check_access()
756 ip = reorder_access->ip; in check_access()
766 size = READ_ONCE(reorder_access->size); in check_access()
776 if (unlikely(ctx->scoped_accesses.prev)) in check_access()
805 pr_warn("non-strict mode configured - use CONFIG_KCSAN_STRICT=y to see all data races\n"); in kcsan_init()
815 ++get_ctx()->disable_count; in kcsan_disable_current()
821 if (get_ctx()->disable_count-- == 0) { in kcsan_enable_current()
837 if (get_ctx()->disable_count-- == 0) in kcsan_enable_current_nowarn()
851 ++get_ctx()->atomic_nest_count; in kcsan_nestable_atomic_begin()
857 if (get_ctx()->atomic_nest_count-- == 0) { in kcsan_nestable_atomic_end()
873 get_ctx()->in_flat_atomic = true; in kcsan_flat_atomic_begin()
879 get_ctx()->in_flat_atomic = false; in kcsan_flat_atomic_end()
885 get_ctx()->atomic_next = n; in kcsan_atomic_next()
891 get_ctx()->access_mask = mask; in kcsan_set_access_mask()
903 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */ in kcsan_begin_scoped_access()
905 INIT_LIST_HEAD(&sa->list); in kcsan_begin_scoped_access()
906 sa->ptr = ptr; in kcsan_begin_scoped_access()
907 sa->size = size; in kcsan_begin_scoped_access()
908 sa->type = type; in kcsan_begin_scoped_access()
909 sa->ip = _RET_IP_; in kcsan_begin_scoped_access()
911 if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */ in kcsan_begin_scoped_access()
912 INIT_LIST_HEAD(&ctx->scoped_accesses); in kcsan_begin_scoped_access()
913 list_add(&sa->list, &ctx->scoped_accesses); in kcsan_begin_scoped_access()
915 ctx->disable_count--; in kcsan_begin_scoped_access()
924 if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__)) in kcsan_end_scoped_access()
927 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */ in kcsan_end_scoped_access()
929 list_del(&sa->list); in kcsan_end_scoped_access()
930 if (list_empty(&ctx->scoped_accesses)) in kcsan_end_scoped_access()
933 * slow-path if unnecessary, and avoids requiring list_empty() in kcsan_end_scoped_access()
934 * in the fast-path (to avoid a READ_ONCE() and potential in kcsan_end_scoped_access()
937 ctx->scoped_accesses.prev = NULL; in kcsan_end_scoped_access()
939 ctx->disable_count--; in kcsan_end_scoped_access()
941 check_access(sa->ptr, sa->size, sa->type, sa->ip); in kcsan_end_scoped_access()
958 sa->size = 0; \
963 DEFINE_MEMORY_BARRIER(wmb, sa->type & (KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND));
964 DEFINE_MEMORY_BARRIER(rmb, !(sa->type & KCSAN_ACCESS_WRITE) || (sa->type & KCSAN_ACCESS_COMPOUND));
1033 * still used in various concurrent context, whether in low-level
1038 * the size-check of compiletime_assert_rwonce_type().
1111 if (get_kcsan_stack_depth() <= reorder_access->stack_depth) { in __tsan_func_exit()
1116 * race due to the write giving up a c-s would only be caught if in __tsan_func_exit()
1119 check_access(reorder_access->ptr, reorder_access->size, in __tsan_func_exit()
1120 reorder_access->type, reorder_access->ip); in __tsan_func_exit()
1121 reorder_access->size = 0; in __tsan_func_exit()
1122 reorder_access->stack_depth = INT_MIN; in __tsan_func_exit()
1125 add_kcsan_stack_depth(-1); in __tsan_func_exit()
1145 * atomic-instrumented) is no longer necessary.
1201 * T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
1203 * T1: if (__atomic_load_n(&p->flag, ...)) {
1205 * p->flag = 0;
1255 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \