1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KCSAN core runtime.
4 *
5 * Copyright (C) 2019, Google LLC.
6 */
7
8 #define pr_fmt(fmt) "kcsan: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/delay.h>
13 #include <linux/export.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/moduleparam.h>
18 #include <linux/percpu.h>
19 #include <linux/preempt.h>
20 #include <linux/sched.h>
21 #include <linux/uaccess.h>
22
23 #include "encoding.h"
24 #include "kcsan.h"
25 #include "permissive.h"
26
27 static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
28 unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
29 unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
30 static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
31 static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
32
33 #ifdef MODULE_PARAM_PREFIX
34 #undef MODULE_PARAM_PREFIX
35 #endif
36 #define MODULE_PARAM_PREFIX "kcsan."
37 module_param_named(early_enable, kcsan_early_enable, bool, 0);
38 module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
39 module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
40 module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
41 module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
42
43 bool kcsan_enabled;
44
45 /* Per-CPU kcsan_ctx for interrupts */
46 static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
47 .disable_count = 0,
48 .atomic_next = 0,
49 .atomic_nest_count = 0,
50 .in_flat_atomic = false,
51 .access_mask = 0,
52 .scoped_accesses = {LIST_POISON1, NULL},
53 };
54
55 /*
56 * Helper macros to index into adjacent slots, starting from address slot
57 * itself, followed by the right and left slots.
58 *
59 * The purpose is 2-fold:
60 *
61 * 1. if during insertion the address slot is already occupied, check if
62 * any adjacent slots are free;
63 * 2. accesses that straddle a slot boundary due to size that exceeds a
64 * slot's range may check adjacent slots if any watchpoint matches.
65 *
66 * Note that accesses with very large size may still miss a watchpoint; however,
67 * given this should be rare, this is a reasonable trade-off to make, since this
68 * will avoid:
69 *
70 * 1. excessive contention between watchpoint checks and setup;
71 * 2. larger number of simultaneous watchpoints without sacrificing
72 * performance.
73 *
74 * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
75 *
76 * slot=0: [ 1, 2, 0]
77 * slot=9: [10, 11, 9]
78 * slot=63: [64, 65, 63]
79 */
80 #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
81
82 /*
83 * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
84 * slot (middle) is fine if we assume that races occur rarely. The set of
85 * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
86 * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
87 */
88 #define SLOT_IDX_FAST(slot, i) (slot + i)
89
90 /*
91 * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
92 * able to safely update and access a watchpoint without introducing locking
93 * overhead, we encode each watchpoint as a single atomic long. The initial
94 * zero-initialized state matches INVALID_WATCHPOINT.
95 *
96 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
97 * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
98 */
99 static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
100
101 /*
102 * Instructions to skip watching counter, used in should_watch(). We use a
103 * per-CPU counter to avoid excessive contention.
104 */
105 static DEFINE_PER_CPU(long, kcsan_skip);
106
107 /* For kcsan_prandom_u32_max(). */
108 static DEFINE_PER_CPU(u32, kcsan_rand_state);
109
find_watchpoint(unsigned long addr,size_t size,bool expect_write,long * encoded_watchpoint)110 static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
111 size_t size,
112 bool expect_write,
113 long *encoded_watchpoint)
114 {
115 const int slot = watchpoint_slot(addr);
116 const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
117 atomic_long_t *watchpoint;
118 unsigned long wp_addr_masked;
119 size_t wp_size;
120 bool is_write;
121 int i;
122
123 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
124
125 for (i = 0; i < NUM_SLOTS; ++i) {
126 watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
127 *encoded_watchpoint = atomic_long_read(watchpoint);
128 if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
129 &wp_size, &is_write))
130 continue;
131
132 if (expect_write && !is_write)
133 continue;
134
135 /* Check if the watchpoint matches the access. */
136 if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
137 return watchpoint;
138 }
139
140 return NULL;
141 }
142
143 static inline atomic_long_t *
insert_watchpoint(unsigned long addr,size_t size,bool is_write)144 insert_watchpoint(unsigned long addr, size_t size, bool is_write)
145 {
146 const int slot = watchpoint_slot(addr);
147 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
148 atomic_long_t *watchpoint;
149 int i;
150
151 /* Check slot index logic, ensuring we stay within array bounds. */
152 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
153 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
154 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
155 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
156
157 for (i = 0; i < NUM_SLOTS; ++i) {
158 long expect_val = INVALID_WATCHPOINT;
159
160 /* Try to acquire this slot. */
161 watchpoint = &watchpoints[SLOT_IDX(slot, i)];
162 if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
163 return watchpoint;
164 }
165
166 return NULL;
167 }
168
169 /*
170 * Return true if watchpoint was successfully consumed, false otherwise.
171 *
172 * This may return false if:
173 *
174 * 1. another thread already consumed the watchpoint;
175 * 2. the thread that set up the watchpoint already removed it;
176 * 3. the watchpoint was removed and then re-used.
177 */
178 static __always_inline bool
try_consume_watchpoint(atomic_long_t * watchpoint,long encoded_watchpoint)179 try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
180 {
181 return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
182 }
183
184 /* Return true if watchpoint was not touched, false if already consumed. */
consume_watchpoint(atomic_long_t * watchpoint)185 static inline bool consume_watchpoint(atomic_long_t *watchpoint)
186 {
187 return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
188 }
189
190 /* Remove the watchpoint -- its slot may be reused after. */
remove_watchpoint(atomic_long_t * watchpoint)191 static inline void remove_watchpoint(atomic_long_t *watchpoint)
192 {
193 atomic_long_set(watchpoint, INVALID_WATCHPOINT);
194 }
195
get_ctx(void)196 static __always_inline struct kcsan_ctx *get_ctx(void)
197 {
198 /*
199 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
200 * also result in calls that generate warnings in uaccess regions.
201 */
202 return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
203 }
204
205 /* Check scoped accesses; never inline because this is a slow-path! */
kcsan_check_scoped_accesses(void)206 static noinline void kcsan_check_scoped_accesses(void)
207 {
208 struct kcsan_ctx *ctx = get_ctx();
209 struct list_head *prev_save = ctx->scoped_accesses.prev;
210 struct kcsan_scoped_access *scoped_access;
211
212 ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
213 list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
214 __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
215 ctx->scoped_accesses.prev = prev_save;
216 }
217
218 /* Rules for generic atomic accesses. Called from fast-path. */
219 static __always_inline bool
is_atomic(const volatile void * ptr,size_t size,int type,struct kcsan_ctx * ctx)220 is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
221 {
222 if (type & KCSAN_ACCESS_ATOMIC)
223 return true;
224
225 /*
226 * Unless explicitly declared atomic, never consider an assertion access
227 * as atomic. This allows using them also in atomic regions, such as
228 * seqlocks, without implicitly changing their semantics.
229 */
230 if (type & KCSAN_ACCESS_ASSERT)
231 return false;
232
233 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
234 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
235 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
236 return true; /* Assume aligned writes up to word size are atomic. */
237
238 if (ctx->atomic_next > 0) {
239 /*
240 * Because we do not have separate contexts for nested
241 * interrupts, in case atomic_next is set, we simply assume that
242 * the outer interrupt set atomic_next. In the worst case, we
243 * will conservatively consider operations as atomic. This is a
244 * reasonable trade-off to make, since this case should be
245 * extremely rare; however, even if extremely rare, it could
246 * lead to false positives otherwise.
247 */
248 if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
249 --ctx->atomic_next; /* in task, or outer interrupt */
250 return true;
251 }
252
253 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
254 }
255
256 static __always_inline bool
should_watch(const volatile void * ptr,size_t size,int type,struct kcsan_ctx * ctx)257 should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
258 {
259 /*
260 * Never set up watchpoints when memory operations are atomic.
261 *
262 * Need to check this first, before kcsan_skip check below: (1) atomics
263 * should not count towards skipped instructions, and (2) to actually
264 * decrement kcsan_atomic_next for consecutive instruction stream.
265 */
266 if (is_atomic(ptr, size, type, ctx))
267 return false;
268
269 if (this_cpu_dec_return(kcsan_skip) >= 0)
270 return false;
271
272 /*
273 * NOTE: If we get here, kcsan_skip must always be reset in slow path
274 * via reset_kcsan_skip() to avoid underflow.
275 */
276
277 /* this operation should be watched */
278 return true;
279 }
280
281 /*
282 * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
283 * congruential generator, using constants from "Numerical Recipes".
284 */
kcsan_prandom_u32_max(u32 ep_ro)285 static u32 kcsan_prandom_u32_max(u32 ep_ro)
286 {
287 u32 state = this_cpu_read(kcsan_rand_state);
288
289 state = 1664525 * state + 1013904223;
290 this_cpu_write(kcsan_rand_state, state);
291
292 return state % ep_ro;
293 }
294
reset_kcsan_skip(void)295 static inline void reset_kcsan_skip(void)
296 {
297 long skip_count = kcsan_skip_watch -
298 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
299 kcsan_prandom_u32_max(kcsan_skip_watch) :
300 0);
301 this_cpu_write(kcsan_skip, skip_count);
302 }
303
kcsan_is_enabled(struct kcsan_ctx * ctx)304 static __always_inline bool kcsan_is_enabled(struct kcsan_ctx *ctx)
305 {
306 return READ_ONCE(kcsan_enabled) && !ctx->disable_count;
307 }
308
309 /* Introduce delay depending on context and configuration. */
delay_access(int type)310 static void delay_access(int type)
311 {
312 unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
313 /* For certain access types, skew the random delay to be longer. */
314 unsigned int skew_delay_order =
315 (type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
316
317 delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
318 kcsan_prandom_u32_max(delay >> skew_delay_order) :
319 0;
320 udelay(delay);
321 }
322
kcsan_save_irqtrace(struct task_struct * task)323 void kcsan_save_irqtrace(struct task_struct *task)
324 {
325 #ifdef CONFIG_TRACE_IRQFLAGS
326 task->kcsan_save_irqtrace = task->irqtrace;
327 #endif
328 }
329
kcsan_restore_irqtrace(struct task_struct * task)330 void kcsan_restore_irqtrace(struct task_struct *task)
331 {
332 #ifdef CONFIG_TRACE_IRQFLAGS
333 task->irqtrace = task->kcsan_save_irqtrace;
334 #endif
335 }
336
337 /*
338 * Pull everything together: check_access() below contains the performance
339 * critical operations; the fast-path (including check_access) functions should
340 * all be inlinable by the instrumentation functions.
341 *
342 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
343 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
344 * be filtered from the stacktrace, as well as give them unique names for the
345 * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
346 * since they do not access any user memory, but instrumentation is still
347 * emitted in UACCESS regions.
348 */
349
kcsan_found_watchpoint(const volatile void * ptr,size_t size,int type,atomic_long_t * watchpoint,long encoded_watchpoint)350 static noinline void kcsan_found_watchpoint(const volatile void *ptr,
351 size_t size,
352 int type,
353 atomic_long_t *watchpoint,
354 long encoded_watchpoint)
355 {
356 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
357 struct kcsan_ctx *ctx = get_ctx();
358 unsigned long flags;
359 bool consumed;
360
361 /*
362 * We know a watchpoint exists. Let's try to keep the race-window
363 * between here and finally consuming the watchpoint below as small as
364 * possible -- avoid unneccessarily complex code until consumed.
365 */
366
367 if (!kcsan_is_enabled(ctx))
368 return;
369
370 /*
371 * The access_mask check relies on value-change comparison. To avoid
372 * reporting a race where e.g. the writer set up the watchpoint, but the
373 * reader has access_mask!=0, we have to ignore the found watchpoint.
374 */
375 if (ctx->access_mask)
376 return;
377
378 /*
379 * If the other thread does not want to ignore the access, and there was
380 * a value change as a result of this thread's operation, we will still
381 * generate a report of unknown origin.
382 *
383 * Use CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n to filter.
384 */
385 if (!is_assert && kcsan_ignore_address(ptr))
386 return;
387
388 /*
389 * Consuming the watchpoint must be guarded by kcsan_is_enabled() to
390 * avoid erroneously triggering reports if the context is disabled.
391 */
392 consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
393
394 /* keep this after try_consume_watchpoint */
395 flags = user_access_save();
396
397 if (consumed) {
398 kcsan_save_irqtrace(current);
399 kcsan_report_set_info(ptr, size, type, watchpoint - watchpoints);
400 kcsan_restore_irqtrace(current);
401 } else {
402 /*
403 * The other thread may not print any diagnostics, as it has
404 * already removed the watchpoint, or another thread consumed
405 * the watchpoint before this thread.
406 */
407 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
408 }
409
410 if (is_assert)
411 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
412 else
413 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
414
415 user_access_restore(flags);
416 }
417
418 static noinline void
kcsan_setup_watchpoint(const volatile void * ptr,size_t size,int type)419 kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
420 {
421 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
422 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
423 atomic_long_t *watchpoint;
424 u64 old, new, diff;
425 unsigned long access_mask;
426 enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
427 unsigned long ua_flags = user_access_save();
428 struct kcsan_ctx *ctx = get_ctx();
429 unsigned long irq_flags = 0;
430
431 /*
432 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
433 * should_watch().
434 */
435 reset_kcsan_skip();
436
437 if (!kcsan_is_enabled(ctx))
438 goto out;
439
440 /*
441 * Check to-ignore addresses after kcsan_is_enabled(), as we may access
442 * memory that is not yet initialized during early boot.
443 */
444 if (!is_assert && kcsan_ignore_address(ptr))
445 goto out;
446
447 if (!check_encodable((unsigned long)ptr, size)) {
448 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
449 goto out;
450 }
451
452 /*
453 * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
454 * runtime is entered for every memory access, and potentially useful
455 * information is lost if dirtied by KCSAN.
456 */
457 kcsan_save_irqtrace(current);
458 if (!kcsan_interrupt_watcher)
459 local_irq_save(irq_flags);
460
461 watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
462 if (watchpoint == NULL) {
463 /*
464 * Out of capacity: the size of 'watchpoints', and the frequency
465 * with which should_watch() returns true should be tweaked so
466 * that this case happens very rarely.
467 */
468 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
469 goto out_unlock;
470 }
471
472 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
473 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
474
475 /*
476 * Read the current value, to later check and infer a race if the data
477 * was modified via a non-instrumented access, e.g. from a device.
478 */
479 old = 0;
480 switch (size) {
481 case 1:
482 old = READ_ONCE(*(const u8 *)ptr);
483 break;
484 case 2:
485 old = READ_ONCE(*(const u16 *)ptr);
486 break;
487 case 4:
488 old = READ_ONCE(*(const u32 *)ptr);
489 break;
490 case 8:
491 old = READ_ONCE(*(const u64 *)ptr);
492 break;
493 default:
494 break; /* ignore; we do not diff the values */
495 }
496
497 /*
498 * Delay this thread, to increase probability of observing a racy
499 * conflicting access.
500 */
501 delay_access(type);
502
503 /*
504 * Re-read value, and check if it is as expected; if not, we infer a
505 * racy access.
506 */
507 access_mask = ctx->access_mask;
508 new = 0;
509 switch (size) {
510 case 1:
511 new = READ_ONCE(*(const u8 *)ptr);
512 break;
513 case 2:
514 new = READ_ONCE(*(const u16 *)ptr);
515 break;
516 case 4:
517 new = READ_ONCE(*(const u32 *)ptr);
518 break;
519 case 8:
520 new = READ_ONCE(*(const u64 *)ptr);
521 break;
522 default:
523 break; /* ignore; we do not diff the values */
524 }
525
526 diff = old ^ new;
527 if (access_mask)
528 diff &= access_mask;
529
530 /*
531 * Check if we observed a value change.
532 *
533 * Also check if the data race should be ignored (the rules depend on
534 * non-zero diff); if it is to be ignored, the below rules for
535 * KCSAN_VALUE_CHANGE_MAYBE apply.
536 */
537 if (diff && !kcsan_ignore_data_race(size, type, old, new, diff))
538 value_change = KCSAN_VALUE_CHANGE_TRUE;
539
540 /* Check if this access raced with another. */
541 if (!consume_watchpoint(watchpoint)) {
542 /*
543 * Depending on the access type, map a value_change of MAYBE to
544 * TRUE (always report) or FALSE (never report).
545 */
546 if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
547 if (access_mask != 0) {
548 /*
549 * For access with access_mask, we require a
550 * value-change, as it is likely that races on
551 * ~access_mask bits are expected.
552 */
553 value_change = KCSAN_VALUE_CHANGE_FALSE;
554 } else if (size > 8 || is_assert) {
555 /* Always assume a value-change. */
556 value_change = KCSAN_VALUE_CHANGE_TRUE;
557 }
558 }
559
560 /*
561 * No need to increment 'data_races' counter, as the racing
562 * thread already did.
563 *
564 * Count 'assert_failures' for each failed ASSERT access,
565 * therefore both this thread and the racing thread may
566 * increment this counter.
567 */
568 if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
569 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
570
571 kcsan_report_known_origin(ptr, size, type, value_change,
572 watchpoint - watchpoints,
573 old, new, access_mask);
574 } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
575 /* Inferring a race, since the value should not have changed. */
576
577 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
578 if (is_assert)
579 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
580
581 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
582 kcsan_report_unknown_origin(ptr, size, type, old, new, access_mask);
583 }
584
585 /*
586 * Remove watchpoint; must be after reporting, since the slot may be
587 * reused after this point.
588 */
589 remove_watchpoint(watchpoint);
590 atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
591 out_unlock:
592 if (!kcsan_interrupt_watcher)
593 local_irq_restore(irq_flags);
594 kcsan_restore_irqtrace(current);
595 out:
596 user_access_restore(ua_flags);
597 }
598
check_access(const volatile void * ptr,size_t size,int type)599 static __always_inline void check_access(const volatile void *ptr, size_t size,
600 int type)
601 {
602 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
603 atomic_long_t *watchpoint;
604 long encoded_watchpoint;
605
606 /*
607 * Do nothing for 0 sized check; this comparison will be optimized out
608 * for constant sized instrumentation (__tsan_{read,write}N).
609 */
610 if (unlikely(size == 0))
611 return;
612
613 /*
614 * Avoid user_access_save in fast-path: find_watchpoint is safe without
615 * user_access_save, as the address that ptr points to is only used to
616 * check if a watchpoint exists; ptr is never dereferenced.
617 */
618 watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
619 &encoded_watchpoint);
620 /*
621 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
622 * slow-path, as long as no state changes that cause a race to be
623 * detected and reported have occurred until kcsan_is_enabled() is
624 * checked.
625 */
626
627 if (unlikely(watchpoint != NULL))
628 kcsan_found_watchpoint(ptr, size, type, watchpoint,
629 encoded_watchpoint);
630 else {
631 struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
632
633 if (unlikely(should_watch(ptr, size, type, ctx)))
634 kcsan_setup_watchpoint(ptr, size, type);
635 else if (unlikely(ctx->scoped_accesses.prev))
636 kcsan_check_scoped_accesses();
637 }
638 }
639
640 /* === Public interface ===================================================== */
641
kcsan_init(void)642 void __init kcsan_init(void)
643 {
644 int cpu;
645
646 BUG_ON(!in_task());
647
648 for_each_possible_cpu(cpu)
649 per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
650
651 /*
652 * We are in the init task, and no other tasks should be running;
653 * WRITE_ONCE without memory barrier is sufficient.
654 */
655 if (kcsan_early_enable) {
656 pr_info("enabled early\n");
657 WRITE_ONCE(kcsan_enabled, true);
658 }
659
660 if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) ||
661 IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) ||
662 IS_ENABLED(CONFIG_KCSAN_PERMISSIVE) ||
663 IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {
664 pr_warn("non-strict mode configured - use CONFIG_KCSAN_STRICT=y to see all data races\n");
665 } else {
666 pr_info("strict mode configured\n");
667 }
668 }
669
670 /* === Exported interface =================================================== */
671
kcsan_disable_current(void)672 void kcsan_disable_current(void)
673 {
674 ++get_ctx()->disable_count;
675 }
676 EXPORT_SYMBOL(kcsan_disable_current);
677
kcsan_enable_current(void)678 void kcsan_enable_current(void)
679 {
680 if (get_ctx()->disable_count-- == 0) {
681 /*
682 * Warn if kcsan_enable_current() calls are unbalanced with
683 * kcsan_disable_current() calls, which causes disable_count to
684 * become negative and should not happen.
685 */
686 kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
687 kcsan_disable_current(); /* disable to generate warning */
688 WARN(1, "Unbalanced %s()", __func__);
689 kcsan_enable_current();
690 }
691 }
692 EXPORT_SYMBOL(kcsan_enable_current);
693
kcsan_enable_current_nowarn(void)694 void kcsan_enable_current_nowarn(void)
695 {
696 if (get_ctx()->disable_count-- == 0)
697 kcsan_disable_current();
698 }
699 EXPORT_SYMBOL(kcsan_enable_current_nowarn);
700
kcsan_nestable_atomic_begin(void)701 void kcsan_nestable_atomic_begin(void)
702 {
703 /*
704 * Do *not* check and warn if we are in a flat atomic region: nestable
705 * and flat atomic regions are independent from each other.
706 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
707 * comments.
708 */
709
710 ++get_ctx()->atomic_nest_count;
711 }
712 EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
713
kcsan_nestable_atomic_end(void)714 void kcsan_nestable_atomic_end(void)
715 {
716 if (get_ctx()->atomic_nest_count-- == 0) {
717 /*
718 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
719 * kcsan_nestable_atomic_begin() calls, which causes
720 * atomic_nest_count to become negative and should not happen.
721 */
722 kcsan_nestable_atomic_begin(); /* restore to 0 */
723 kcsan_disable_current(); /* disable to generate warning */
724 WARN(1, "Unbalanced %s()", __func__);
725 kcsan_enable_current();
726 }
727 }
728 EXPORT_SYMBOL(kcsan_nestable_atomic_end);
729
kcsan_flat_atomic_begin(void)730 void kcsan_flat_atomic_begin(void)
731 {
732 get_ctx()->in_flat_atomic = true;
733 }
734 EXPORT_SYMBOL(kcsan_flat_atomic_begin);
735
kcsan_flat_atomic_end(void)736 void kcsan_flat_atomic_end(void)
737 {
738 get_ctx()->in_flat_atomic = false;
739 }
740 EXPORT_SYMBOL(kcsan_flat_atomic_end);
741
kcsan_atomic_next(int n)742 void kcsan_atomic_next(int n)
743 {
744 get_ctx()->atomic_next = n;
745 }
746 EXPORT_SYMBOL(kcsan_atomic_next);
747
kcsan_set_access_mask(unsigned long mask)748 void kcsan_set_access_mask(unsigned long mask)
749 {
750 get_ctx()->access_mask = mask;
751 }
752 EXPORT_SYMBOL(kcsan_set_access_mask);
753
754 struct kcsan_scoped_access *
kcsan_begin_scoped_access(const volatile void * ptr,size_t size,int type,struct kcsan_scoped_access * sa)755 kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
756 struct kcsan_scoped_access *sa)
757 {
758 struct kcsan_ctx *ctx = get_ctx();
759
760 __kcsan_check_access(ptr, size, type);
761
762 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
763
764 INIT_LIST_HEAD(&sa->list);
765 sa->ptr = ptr;
766 sa->size = size;
767 sa->type = type;
768
769 if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
770 INIT_LIST_HEAD(&ctx->scoped_accesses);
771 list_add(&sa->list, &ctx->scoped_accesses);
772
773 ctx->disable_count--;
774 return sa;
775 }
776 EXPORT_SYMBOL(kcsan_begin_scoped_access);
777
kcsan_end_scoped_access(struct kcsan_scoped_access * sa)778 void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
779 {
780 struct kcsan_ctx *ctx = get_ctx();
781
782 if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
783 return;
784
785 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
786
787 list_del(&sa->list);
788 if (list_empty(&ctx->scoped_accesses))
789 /*
790 * Ensure we do not enter kcsan_check_scoped_accesses()
791 * slow-path if unnecessary, and avoids requiring list_empty()
792 * in the fast-path (to avoid a READ_ONCE() and potential
793 * uaccess warning).
794 */
795 ctx->scoped_accesses.prev = NULL;
796
797 ctx->disable_count--;
798
799 __kcsan_check_access(sa->ptr, sa->size, sa->type);
800 }
801 EXPORT_SYMBOL(kcsan_end_scoped_access);
802
__kcsan_check_access(const volatile void * ptr,size_t size,int type)803 void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
804 {
805 check_access(ptr, size, type);
806 }
807 EXPORT_SYMBOL(__kcsan_check_access);
808
809 /*
810 * KCSAN uses the same instrumentation that is emitted by supported compilers
811 * for ThreadSanitizer (TSAN).
812 *
813 * When enabled, the compiler emits instrumentation calls (the functions
814 * prefixed with "__tsan" below) for all loads and stores that it generated;
815 * inline asm is not instrumented.
816 *
817 * Note that, not all supported compiler versions distinguish aligned/unaligned
818 * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
819 * version to the generic version, which can handle both.
820 */
821
822 #define DEFINE_TSAN_READ_WRITE(size) \
823 void __tsan_read##size(void *ptr); \
824 void __tsan_read##size(void *ptr) \
825 { \
826 check_access(ptr, size, 0); \
827 } \
828 EXPORT_SYMBOL(__tsan_read##size); \
829 void __tsan_unaligned_read##size(void *ptr) \
830 __alias(__tsan_read##size); \
831 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
832 void __tsan_write##size(void *ptr); \
833 void __tsan_write##size(void *ptr) \
834 { \
835 check_access(ptr, size, KCSAN_ACCESS_WRITE); \
836 } \
837 EXPORT_SYMBOL(__tsan_write##size); \
838 void __tsan_unaligned_write##size(void *ptr) \
839 __alias(__tsan_write##size); \
840 EXPORT_SYMBOL(__tsan_unaligned_write##size); \
841 void __tsan_read_write##size(void *ptr); \
842 void __tsan_read_write##size(void *ptr) \
843 { \
844 check_access(ptr, size, \
845 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
846 } \
847 EXPORT_SYMBOL(__tsan_read_write##size); \
848 void __tsan_unaligned_read_write##size(void *ptr) \
849 __alias(__tsan_read_write##size); \
850 EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
851
852 DEFINE_TSAN_READ_WRITE(1);
853 DEFINE_TSAN_READ_WRITE(2);
854 DEFINE_TSAN_READ_WRITE(4);
855 DEFINE_TSAN_READ_WRITE(8);
856 DEFINE_TSAN_READ_WRITE(16);
857
858 void __tsan_read_range(void *ptr, size_t size);
__tsan_read_range(void * ptr,size_t size)859 void __tsan_read_range(void *ptr, size_t size)
860 {
861 check_access(ptr, size, 0);
862 }
863 EXPORT_SYMBOL(__tsan_read_range);
864
865 void __tsan_write_range(void *ptr, size_t size);
__tsan_write_range(void * ptr,size_t size)866 void __tsan_write_range(void *ptr, size_t size)
867 {
868 check_access(ptr, size, KCSAN_ACCESS_WRITE);
869 }
870 EXPORT_SYMBOL(__tsan_write_range);
871
872 /*
873 * Use of explicit volatile is generally disallowed [1], however, volatile is
874 * still used in various concurrent context, whether in low-level
875 * synchronization primitives or for legacy reasons.
876 * [1] https://lwn.net/Articles/233479/
877 *
878 * We only consider volatile accesses atomic if they are aligned and would pass
879 * the size-check of compiletime_assert_rwonce_type().
880 */
881 #define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
882 void __tsan_volatile_read##size(void *ptr); \
883 void __tsan_volatile_read##size(void *ptr) \
884 { \
885 const bool is_atomic = size <= sizeof(long long) && \
886 IS_ALIGNED((unsigned long)ptr, size); \
887 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
888 return; \
889 check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
890 } \
891 EXPORT_SYMBOL(__tsan_volatile_read##size); \
892 void __tsan_unaligned_volatile_read##size(void *ptr) \
893 __alias(__tsan_volatile_read##size); \
894 EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
895 void __tsan_volatile_write##size(void *ptr); \
896 void __tsan_volatile_write##size(void *ptr) \
897 { \
898 const bool is_atomic = size <= sizeof(long long) && \
899 IS_ALIGNED((unsigned long)ptr, size); \
900 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
901 return; \
902 check_access(ptr, size, \
903 KCSAN_ACCESS_WRITE | \
904 (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
905 } \
906 EXPORT_SYMBOL(__tsan_volatile_write##size); \
907 void __tsan_unaligned_volatile_write##size(void *ptr) \
908 __alias(__tsan_volatile_write##size); \
909 EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
910
911 DEFINE_TSAN_VOLATILE_READ_WRITE(1);
912 DEFINE_TSAN_VOLATILE_READ_WRITE(2);
913 DEFINE_TSAN_VOLATILE_READ_WRITE(4);
914 DEFINE_TSAN_VOLATILE_READ_WRITE(8);
915 DEFINE_TSAN_VOLATILE_READ_WRITE(16);
916
917 /*
918 * The below are not required by KCSAN, but can still be emitted by the
919 * compiler.
920 */
921 void __tsan_func_entry(void *call_pc);
__tsan_func_entry(void * call_pc)922 void __tsan_func_entry(void *call_pc)
923 {
924 }
925 EXPORT_SYMBOL(__tsan_func_entry);
926 void __tsan_func_exit(void);
__tsan_func_exit(void)927 void __tsan_func_exit(void)
928 {
929 }
930 EXPORT_SYMBOL(__tsan_func_exit);
931 void __tsan_init(void);
__tsan_init(void)932 void __tsan_init(void)
933 {
934 }
935 EXPORT_SYMBOL(__tsan_init);
936
937 /*
938 * Instrumentation for atomic builtins (__atomic_*, __sync_*).
939 *
940 * Normal kernel code _should not_ be using them directly, but some
941 * architectures may implement some or all atomics using the compilers'
942 * builtins.
943 *
944 * Note: If an architecture decides to fully implement atomics using the
945 * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
946 * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
947 * atomic-instrumented) is no longer necessary.
948 *
949 * TSAN instrumentation replaces atomic accesses with calls to any of the below
950 * functions, whose job is to also execute the operation itself.
951 */
952
953 #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
954 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
955 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
956 { \
957 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
958 check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
959 } \
960 return __atomic_load_n(ptr, memorder); \
961 } \
962 EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
963 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
964 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
965 { \
966 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
967 check_access(ptr, bits / BITS_PER_BYTE, \
968 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
969 } \
970 __atomic_store_n(ptr, v, memorder); \
971 } \
972 EXPORT_SYMBOL(__tsan_atomic##bits##_store)
973
974 #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
975 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
976 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
977 { \
978 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
979 check_access(ptr, bits / BITS_PER_BYTE, \
980 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
981 KCSAN_ACCESS_ATOMIC); \
982 } \
983 return __atomic_##op##suffix(ptr, v, memorder); \
984 } \
985 EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
986
987 /*
988 * Note: CAS operations are always classified as write, even in case they
989 * fail. We cannot perform check_access() after a write, as it might lead to
990 * false positives, in cases such as:
991 *
992 * T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
993 *
994 * T1: if (__atomic_load_n(&p->flag, ...)) {
995 * modify *p;
996 * p->flag = 0;
997 * }
998 *
999 * The only downside is that, if there are 3 threads, with one CAS that
1000 * succeeds, another CAS that fails, and an unmarked racing operation, we may
1001 * point at the wrong CAS as the source of the race. However, if we assume that
1002 * all CAS can succeed in some other execution, the data race is still valid.
1003 */
1004 #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
1005 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
1006 u##bits val, int mo, int fail_mo); \
1007 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
1008 u##bits val, int mo, int fail_mo) \
1009 { \
1010 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1011 check_access(ptr, bits / BITS_PER_BYTE, \
1012 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1013 KCSAN_ACCESS_ATOMIC); \
1014 } \
1015 return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
1016 } \
1017 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
1018
1019 #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
1020 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1021 int mo, int fail_mo); \
1022 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1023 int mo, int fail_mo) \
1024 { \
1025 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1026 check_access(ptr, bits / BITS_PER_BYTE, \
1027 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1028 KCSAN_ACCESS_ATOMIC); \
1029 } \
1030 __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
1031 return exp; \
1032 } \
1033 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
1034
1035 #define DEFINE_TSAN_ATOMIC_OPS(bits) \
1036 DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
1037 DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
1038 DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
1039 DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
1040 DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
1041 DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
1042 DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
1043 DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
1044 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
1045 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
1046 DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
1047
1048 DEFINE_TSAN_ATOMIC_OPS(8);
1049 DEFINE_TSAN_ATOMIC_OPS(16);
1050 DEFINE_TSAN_ATOMIC_OPS(32);
1051 DEFINE_TSAN_ATOMIC_OPS(64);
1052
1053 void __tsan_atomic_thread_fence(int memorder);
__tsan_atomic_thread_fence(int memorder)1054 void __tsan_atomic_thread_fence(int memorder)
1055 {
1056 __atomic_thread_fence(memorder);
1057 }
1058 EXPORT_SYMBOL(__tsan_atomic_thread_fence);
1059
1060 void __tsan_atomic_signal_fence(int memorder);
__tsan_atomic_signal_fence(int memorder)1061 void __tsan_atomic_signal_fence(int memorder) { }
1062 EXPORT_SYMBOL(__tsan_atomic_signal_fence);
1063