1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KCSAN core runtime.
4 *
5 * Copyright (C) 2019, Google LLC.
6 */
7
8 #define pr_fmt(fmt) "kcsan: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/delay.h>
13 #include <linux/export.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/moduleparam.h>
18 #include <linux/percpu.h>
19 #include <linux/preempt.h>
20 #include <linux/sched.h>
21 #include <linux/uaccess.h>
22
23 #include "atomic.h"
24 #include "encoding.h"
25 #include "kcsan.h"
26
27 static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
28 unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
29 unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
30 static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
31 static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
32
33 #ifdef MODULE_PARAM_PREFIX
34 #undef MODULE_PARAM_PREFIX
35 #endif
36 #define MODULE_PARAM_PREFIX "kcsan."
37 module_param_named(early_enable, kcsan_early_enable, bool, 0);
38 module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
39 module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
40 module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
41 module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
42
43 bool kcsan_enabled;
44
45 /* Per-CPU kcsan_ctx for interrupts */
46 static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
47 .disable_count = 0,
48 .atomic_next = 0,
49 .atomic_nest_count = 0,
50 .in_flat_atomic = false,
51 .access_mask = 0,
52 .scoped_accesses = {LIST_POISON1, NULL},
53 };
54
55 /*
56 * Helper macros to index into adjacent slots, starting from address slot
57 * itself, followed by the right and left slots.
58 *
59 * The purpose is 2-fold:
60 *
61 * 1. if during insertion the address slot is already occupied, check if
62 * any adjacent slots are free;
63 * 2. accesses that straddle a slot boundary due to size that exceeds a
64 * slot's range may check adjacent slots if any watchpoint matches.
65 *
66 * Note that accesses with very large size may still miss a watchpoint; however,
67 * given this should be rare, this is a reasonable trade-off to make, since this
68 * will avoid:
69 *
70 * 1. excessive contention between watchpoint checks and setup;
71 * 2. larger number of simultaneous watchpoints without sacrificing
72 * performance.
73 *
74 * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
75 *
76 * slot=0: [ 1, 2, 0]
77 * slot=9: [10, 11, 9]
78 * slot=63: [64, 65, 63]
79 */
80 #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
81
82 /*
83 * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
84 * slot (middle) is fine if we assume that races occur rarely. The set of
85 * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
86 * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
87 */
88 #define SLOT_IDX_FAST(slot, i) (slot + i)
89
90 /*
91 * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
92 * able to safely update and access a watchpoint without introducing locking
93 * overhead, we encode each watchpoint as a single atomic long. The initial
94 * zero-initialized state matches INVALID_WATCHPOINT.
95 *
96 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
97 * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
98 */
99 static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
100
101 /*
102 * Instructions to skip watching counter, used in should_watch(). We use a
103 * per-CPU counter to avoid excessive contention.
104 */
105 static DEFINE_PER_CPU(long, kcsan_skip);
106
107 /* For kcsan_prandom_u32_max(). */
108 static DEFINE_PER_CPU(u32, kcsan_rand_state);
109
find_watchpoint(unsigned long addr,size_t size,bool expect_write,long * encoded_watchpoint)110 static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
111 size_t size,
112 bool expect_write,
113 long *encoded_watchpoint)
114 {
115 const int slot = watchpoint_slot(addr);
116 const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
117 atomic_long_t *watchpoint;
118 unsigned long wp_addr_masked;
119 size_t wp_size;
120 bool is_write;
121 int i;
122
123 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
124
125 for (i = 0; i < NUM_SLOTS; ++i) {
126 watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
127 *encoded_watchpoint = atomic_long_read(watchpoint);
128 if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
129 &wp_size, &is_write))
130 continue;
131
132 if (expect_write && !is_write)
133 continue;
134
135 /* Check if the watchpoint matches the access. */
136 if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
137 return watchpoint;
138 }
139
140 return NULL;
141 }
142
143 static inline atomic_long_t *
insert_watchpoint(unsigned long addr,size_t size,bool is_write)144 insert_watchpoint(unsigned long addr, size_t size, bool is_write)
145 {
146 const int slot = watchpoint_slot(addr);
147 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
148 atomic_long_t *watchpoint;
149 int i;
150
151 /* Check slot index logic, ensuring we stay within array bounds. */
152 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
153 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
154 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
155 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
156
157 for (i = 0; i < NUM_SLOTS; ++i) {
158 long expect_val = INVALID_WATCHPOINT;
159
160 /* Try to acquire this slot. */
161 watchpoint = &watchpoints[SLOT_IDX(slot, i)];
162 if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
163 return watchpoint;
164 }
165
166 return NULL;
167 }
168
169 /*
170 * Return true if watchpoint was successfully consumed, false otherwise.
171 *
172 * This may return false if:
173 *
174 * 1. another thread already consumed the watchpoint;
175 * 2. the thread that set up the watchpoint already removed it;
176 * 3. the watchpoint was removed and then re-used.
177 */
178 static __always_inline bool
try_consume_watchpoint(atomic_long_t * watchpoint,long encoded_watchpoint)179 try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
180 {
181 return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
182 }
183
184 /* Return true if watchpoint was not touched, false if already consumed. */
consume_watchpoint(atomic_long_t * watchpoint)185 static inline bool consume_watchpoint(atomic_long_t *watchpoint)
186 {
187 return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
188 }
189
190 /* Remove the watchpoint -- its slot may be reused after. */
remove_watchpoint(atomic_long_t * watchpoint)191 static inline void remove_watchpoint(atomic_long_t *watchpoint)
192 {
193 atomic_long_set(watchpoint, INVALID_WATCHPOINT);
194 }
195
get_ctx(void)196 static __always_inline struct kcsan_ctx *get_ctx(void)
197 {
198 /*
199 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
200 * also result in calls that generate warnings in uaccess regions.
201 */
202 return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
203 }
204
205 /* Check scoped accesses; never inline because this is a slow-path! */
kcsan_check_scoped_accesses(void)206 static noinline void kcsan_check_scoped_accesses(void)
207 {
208 struct kcsan_ctx *ctx = get_ctx();
209 struct list_head *prev_save = ctx->scoped_accesses.prev;
210 struct kcsan_scoped_access *scoped_access;
211
212 ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
213 list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
214 __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
215 ctx->scoped_accesses.prev = prev_save;
216 }
217
218 /* Rules for generic atomic accesses. Called from fast-path. */
219 static __always_inline bool
is_atomic(const volatile void * ptr,size_t size,int type,struct kcsan_ctx * ctx)220 is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
221 {
222 if (type & KCSAN_ACCESS_ATOMIC)
223 return true;
224
225 /*
226 * Unless explicitly declared atomic, never consider an assertion access
227 * as atomic. This allows using them also in atomic regions, such as
228 * seqlocks, without implicitly changing their semantics.
229 */
230 if (type & KCSAN_ACCESS_ASSERT)
231 return false;
232
233 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
234 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
235 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
236 return true; /* Assume aligned writes up to word size are atomic. */
237
238 if (ctx->atomic_next > 0) {
239 /*
240 * Because we do not have separate contexts for nested
241 * interrupts, in case atomic_next is set, we simply assume that
242 * the outer interrupt set atomic_next. In the worst case, we
243 * will conservatively consider operations as atomic. This is a
244 * reasonable trade-off to make, since this case should be
245 * extremely rare; however, even if extremely rare, it could
246 * lead to false positives otherwise.
247 */
248 if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
249 --ctx->atomic_next; /* in task, or outer interrupt */
250 return true;
251 }
252
253 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
254 }
255
256 static __always_inline bool
should_watch(const volatile void * ptr,size_t size,int type,struct kcsan_ctx * ctx)257 should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
258 {
259 /*
260 * Never set up watchpoints when memory operations are atomic.
261 *
262 * Need to check this first, before kcsan_skip check below: (1) atomics
263 * should not count towards skipped instructions, and (2) to actually
264 * decrement kcsan_atomic_next for consecutive instruction stream.
265 */
266 if (is_atomic(ptr, size, type, ctx))
267 return false;
268
269 if (this_cpu_dec_return(kcsan_skip) >= 0)
270 return false;
271
272 /*
273 * NOTE: If we get here, kcsan_skip must always be reset in slow path
274 * via reset_kcsan_skip() to avoid underflow.
275 */
276
277 /* this operation should be watched */
278 return true;
279 }
280
281 /*
282 * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
283 * congruential generator, using constants from "Numerical Recipes".
284 */
kcsan_prandom_u32_max(u32 ep_ro)285 static u32 kcsan_prandom_u32_max(u32 ep_ro)
286 {
287 u32 state = this_cpu_read(kcsan_rand_state);
288
289 state = 1664525 * state + 1013904223;
290 this_cpu_write(kcsan_rand_state, state);
291
292 return state % ep_ro;
293 }
294
reset_kcsan_skip(void)295 static inline void reset_kcsan_skip(void)
296 {
297 long skip_count = kcsan_skip_watch -
298 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
299 kcsan_prandom_u32_max(kcsan_skip_watch) :
300 0);
301 this_cpu_write(kcsan_skip, skip_count);
302 }
303
kcsan_is_enabled(void)304 static __always_inline bool kcsan_is_enabled(void)
305 {
306 return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
307 }
308
309 /* Introduce delay depending on context and configuration. */
delay_access(int type)310 static void delay_access(int type)
311 {
312 unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
313 /* For certain access types, skew the random delay to be longer. */
314 unsigned int skew_delay_order =
315 (type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
316
317 delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
318 kcsan_prandom_u32_max(delay >> skew_delay_order) :
319 0;
320 udelay(delay);
321 }
322
kcsan_save_irqtrace(struct task_struct * task)323 void kcsan_save_irqtrace(struct task_struct *task)
324 {
325 #ifdef CONFIG_TRACE_IRQFLAGS
326 task->kcsan_save_irqtrace = task->irqtrace;
327 #endif
328 }
329
kcsan_restore_irqtrace(struct task_struct * task)330 void kcsan_restore_irqtrace(struct task_struct *task)
331 {
332 #ifdef CONFIG_TRACE_IRQFLAGS
333 task->irqtrace = task->kcsan_save_irqtrace;
334 #endif
335 }
336
337 /*
338 * Pull everything together: check_access() below contains the performance
339 * critical operations; the fast-path (including check_access) functions should
340 * all be inlinable by the instrumentation functions.
341 *
342 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
343 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
344 * be filtered from the stacktrace, as well as give them unique names for the
345 * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
346 * since they do not access any user memory, but instrumentation is still
347 * emitted in UACCESS regions.
348 */
349
kcsan_found_watchpoint(const volatile void * ptr,size_t size,int type,atomic_long_t * watchpoint,long encoded_watchpoint)350 static noinline void kcsan_found_watchpoint(const volatile void *ptr,
351 size_t size,
352 int type,
353 atomic_long_t *watchpoint,
354 long encoded_watchpoint)
355 {
356 unsigned long flags;
357 bool consumed;
358
359 if (!kcsan_is_enabled())
360 return;
361
362 /*
363 * The access_mask check relies on value-change comparison. To avoid
364 * reporting a race where e.g. the writer set up the watchpoint, but the
365 * reader has access_mask!=0, we have to ignore the found watchpoint.
366 */
367 if (get_ctx()->access_mask != 0)
368 return;
369
370 /*
371 * Consume the watchpoint as soon as possible, to minimize the chances
372 * of !consumed. Consuming the watchpoint must always be guarded by
373 * kcsan_is_enabled() check, as otherwise we might erroneously
374 * triggering reports when disabled.
375 */
376 consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
377
378 /* keep this after try_consume_watchpoint */
379 flags = user_access_save();
380
381 if (consumed) {
382 kcsan_save_irqtrace(current);
383 kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE,
384 KCSAN_REPORT_CONSUMED_WATCHPOINT,
385 watchpoint - watchpoints);
386 kcsan_restore_irqtrace(current);
387 } else {
388 /*
389 * The other thread may not print any diagnostics, as it has
390 * already removed the watchpoint, or another thread consumed
391 * the watchpoint before this thread.
392 */
393 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
394 }
395
396 if ((type & KCSAN_ACCESS_ASSERT) != 0)
397 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
398 else
399 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
400
401 user_access_restore(flags);
402 }
403
404 static noinline void
kcsan_setup_watchpoint(const volatile void * ptr,size_t size,int type)405 kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
406 {
407 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
408 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
409 atomic_long_t *watchpoint;
410 union {
411 u8 _1;
412 u16 _2;
413 u32 _4;
414 u64 _8;
415 } expect_value;
416 unsigned long access_mask;
417 enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
418 unsigned long ua_flags = user_access_save();
419 unsigned long irq_flags = 0;
420
421 /*
422 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
423 * should_watch().
424 */
425 reset_kcsan_skip();
426
427 if (!kcsan_is_enabled())
428 goto out;
429
430 /*
431 * Special atomic rules: unlikely to be true, so we check them here in
432 * the slow-path, and not in the fast-path in is_atomic(). Call after
433 * kcsan_is_enabled(), as we may access memory that is not yet
434 * initialized during early boot.
435 */
436 if (!is_assert && kcsan_is_atomic_special(ptr))
437 goto out;
438
439 if (!check_encodable((unsigned long)ptr, size)) {
440 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
441 goto out;
442 }
443
444 /*
445 * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
446 * runtime is entered for every memory access, and potentially useful
447 * information is lost if dirtied by KCSAN.
448 */
449 kcsan_save_irqtrace(current);
450 if (!kcsan_interrupt_watcher)
451 local_irq_save(irq_flags);
452
453 watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
454 if (watchpoint == NULL) {
455 /*
456 * Out of capacity: the size of 'watchpoints', and the frequency
457 * with which should_watch() returns true should be tweaked so
458 * that this case happens very rarely.
459 */
460 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
461 goto out_unlock;
462 }
463
464 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
465 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
466
467 /*
468 * Read the current value, to later check and infer a race if the data
469 * was modified via a non-instrumented access, e.g. from a device.
470 */
471 expect_value._8 = 0;
472 switch (size) {
473 case 1:
474 expect_value._1 = READ_ONCE(*(const u8 *)ptr);
475 break;
476 case 2:
477 expect_value._2 = READ_ONCE(*(const u16 *)ptr);
478 break;
479 case 4:
480 expect_value._4 = READ_ONCE(*(const u32 *)ptr);
481 break;
482 case 8:
483 expect_value._8 = READ_ONCE(*(const u64 *)ptr);
484 break;
485 default:
486 break; /* ignore; we do not diff the values */
487 }
488
489 if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
490 kcsan_disable_current();
491 pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
492 is_write ? "write" : "read", size, ptr,
493 watchpoint_slot((unsigned long)ptr),
494 encode_watchpoint((unsigned long)ptr, size, is_write));
495 kcsan_enable_current();
496 }
497
498 /*
499 * Delay this thread, to increase probability of observing a racy
500 * conflicting access.
501 */
502 delay_access(type);
503
504 /*
505 * Re-read value, and check if it is as expected; if not, we infer a
506 * racy access.
507 */
508 access_mask = get_ctx()->access_mask;
509 switch (size) {
510 case 1:
511 expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
512 if (access_mask)
513 expect_value._1 &= (u8)access_mask;
514 break;
515 case 2:
516 expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
517 if (access_mask)
518 expect_value._2 &= (u16)access_mask;
519 break;
520 case 4:
521 expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
522 if (access_mask)
523 expect_value._4 &= (u32)access_mask;
524 break;
525 case 8:
526 expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
527 if (access_mask)
528 expect_value._8 &= (u64)access_mask;
529 break;
530 default:
531 break; /* ignore; we do not diff the values */
532 }
533
534 /* Were we able to observe a value-change? */
535 if (expect_value._8 != 0)
536 value_change = KCSAN_VALUE_CHANGE_TRUE;
537
538 /* Check if this access raced with another. */
539 if (!consume_watchpoint(watchpoint)) {
540 /*
541 * Depending on the access type, map a value_change of MAYBE to
542 * TRUE (always report) or FALSE (never report).
543 */
544 if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
545 if (access_mask != 0) {
546 /*
547 * For access with access_mask, we require a
548 * value-change, as it is likely that races on
549 * ~access_mask bits are expected.
550 */
551 value_change = KCSAN_VALUE_CHANGE_FALSE;
552 } else if (size > 8 || is_assert) {
553 /* Always assume a value-change. */
554 value_change = KCSAN_VALUE_CHANGE_TRUE;
555 }
556 }
557
558 /*
559 * No need to increment 'data_races' counter, as the racing
560 * thread already did.
561 *
562 * Count 'assert_failures' for each failed ASSERT access,
563 * therefore both this thread and the racing thread may
564 * increment this counter.
565 */
566 if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
567 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
568
569 kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL,
570 watchpoint - watchpoints);
571 } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
572 /* Inferring a race, since the value should not have changed. */
573
574 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
575 if (is_assert)
576 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
577
578 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
579 kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
580 KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
581 watchpoint - watchpoints);
582 }
583
584 /*
585 * Remove watchpoint; must be after reporting, since the slot may be
586 * reused after this point.
587 */
588 remove_watchpoint(watchpoint);
589 atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
590 out_unlock:
591 if (!kcsan_interrupt_watcher)
592 local_irq_restore(irq_flags);
593 kcsan_restore_irqtrace(current);
594 out:
595 user_access_restore(ua_flags);
596 }
597
check_access(const volatile void * ptr,size_t size,int type)598 static __always_inline void check_access(const volatile void *ptr, size_t size,
599 int type)
600 {
601 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
602 atomic_long_t *watchpoint;
603 long encoded_watchpoint;
604
605 /*
606 * Do nothing for 0 sized check; this comparison will be optimized out
607 * for constant sized instrumentation (__tsan_{read,write}N).
608 */
609 if (unlikely(size == 0))
610 return;
611
612 /*
613 * Avoid user_access_save in fast-path: find_watchpoint is safe without
614 * user_access_save, as the address that ptr points to is only used to
615 * check if a watchpoint exists; ptr is never dereferenced.
616 */
617 watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
618 &encoded_watchpoint);
619 /*
620 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
621 * slow-path, as long as no state changes that cause a race to be
622 * detected and reported have occurred until kcsan_is_enabled() is
623 * checked.
624 */
625
626 if (unlikely(watchpoint != NULL))
627 kcsan_found_watchpoint(ptr, size, type, watchpoint,
628 encoded_watchpoint);
629 else {
630 struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
631
632 if (unlikely(should_watch(ptr, size, type, ctx)))
633 kcsan_setup_watchpoint(ptr, size, type);
634 else if (unlikely(ctx->scoped_accesses.prev))
635 kcsan_check_scoped_accesses();
636 }
637 }
638
639 /* === Public interface ===================================================== */
640
kcsan_init(void)641 void __init kcsan_init(void)
642 {
643 int cpu;
644
645 BUG_ON(!in_task());
646
647 for_each_possible_cpu(cpu)
648 per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
649
650 /*
651 * We are in the init task, and no other tasks should be running;
652 * WRITE_ONCE without memory barrier is sufficient.
653 */
654 if (kcsan_early_enable) {
655 pr_info("enabled early\n");
656 WRITE_ONCE(kcsan_enabled, true);
657 }
658 }
659
660 /* === Exported interface =================================================== */
661
kcsan_disable_current(void)662 void kcsan_disable_current(void)
663 {
664 ++get_ctx()->disable_count;
665 }
666 EXPORT_SYMBOL(kcsan_disable_current);
667
kcsan_enable_current(void)668 void kcsan_enable_current(void)
669 {
670 if (get_ctx()->disable_count-- == 0) {
671 /*
672 * Warn if kcsan_enable_current() calls are unbalanced with
673 * kcsan_disable_current() calls, which causes disable_count to
674 * become negative and should not happen.
675 */
676 kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
677 kcsan_disable_current(); /* disable to generate warning */
678 WARN(1, "Unbalanced %s()", __func__);
679 kcsan_enable_current();
680 }
681 }
682 EXPORT_SYMBOL(kcsan_enable_current);
683
kcsan_enable_current_nowarn(void)684 void kcsan_enable_current_nowarn(void)
685 {
686 if (get_ctx()->disable_count-- == 0)
687 kcsan_disable_current();
688 }
689 EXPORT_SYMBOL(kcsan_enable_current_nowarn);
690
kcsan_nestable_atomic_begin(void)691 void kcsan_nestable_atomic_begin(void)
692 {
693 /*
694 * Do *not* check and warn if we are in a flat atomic region: nestable
695 * and flat atomic regions are independent from each other.
696 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
697 * comments.
698 */
699
700 ++get_ctx()->atomic_nest_count;
701 }
702 EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
703
kcsan_nestable_atomic_end(void)704 void kcsan_nestable_atomic_end(void)
705 {
706 if (get_ctx()->atomic_nest_count-- == 0) {
707 /*
708 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
709 * kcsan_nestable_atomic_begin() calls, which causes
710 * atomic_nest_count to become negative and should not happen.
711 */
712 kcsan_nestable_atomic_begin(); /* restore to 0 */
713 kcsan_disable_current(); /* disable to generate warning */
714 WARN(1, "Unbalanced %s()", __func__);
715 kcsan_enable_current();
716 }
717 }
718 EXPORT_SYMBOL(kcsan_nestable_atomic_end);
719
kcsan_flat_atomic_begin(void)720 void kcsan_flat_atomic_begin(void)
721 {
722 get_ctx()->in_flat_atomic = true;
723 }
724 EXPORT_SYMBOL(kcsan_flat_atomic_begin);
725
kcsan_flat_atomic_end(void)726 void kcsan_flat_atomic_end(void)
727 {
728 get_ctx()->in_flat_atomic = false;
729 }
730 EXPORT_SYMBOL(kcsan_flat_atomic_end);
731
kcsan_atomic_next(int n)732 void kcsan_atomic_next(int n)
733 {
734 get_ctx()->atomic_next = n;
735 }
736 EXPORT_SYMBOL(kcsan_atomic_next);
737
kcsan_set_access_mask(unsigned long mask)738 void kcsan_set_access_mask(unsigned long mask)
739 {
740 get_ctx()->access_mask = mask;
741 }
742 EXPORT_SYMBOL(kcsan_set_access_mask);
743
744 struct kcsan_scoped_access *
kcsan_begin_scoped_access(const volatile void * ptr,size_t size,int type,struct kcsan_scoped_access * sa)745 kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
746 struct kcsan_scoped_access *sa)
747 {
748 struct kcsan_ctx *ctx = get_ctx();
749
750 __kcsan_check_access(ptr, size, type);
751
752 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
753
754 INIT_LIST_HEAD(&sa->list);
755 sa->ptr = ptr;
756 sa->size = size;
757 sa->type = type;
758
759 if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
760 INIT_LIST_HEAD(&ctx->scoped_accesses);
761 list_add(&sa->list, &ctx->scoped_accesses);
762
763 ctx->disable_count--;
764 return sa;
765 }
766 EXPORT_SYMBOL(kcsan_begin_scoped_access);
767
kcsan_end_scoped_access(struct kcsan_scoped_access * sa)768 void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
769 {
770 struct kcsan_ctx *ctx = get_ctx();
771
772 if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
773 return;
774
775 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
776
777 list_del(&sa->list);
778 if (list_empty(&ctx->scoped_accesses))
779 /*
780 * Ensure we do not enter kcsan_check_scoped_accesses()
781 * slow-path if unnecessary, and avoids requiring list_empty()
782 * in the fast-path (to avoid a READ_ONCE() and potential
783 * uaccess warning).
784 */
785 ctx->scoped_accesses.prev = NULL;
786
787 ctx->disable_count--;
788
789 __kcsan_check_access(sa->ptr, sa->size, sa->type);
790 }
791 EXPORT_SYMBOL(kcsan_end_scoped_access);
792
__kcsan_check_access(const volatile void * ptr,size_t size,int type)793 void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
794 {
795 check_access(ptr, size, type);
796 }
797 EXPORT_SYMBOL(__kcsan_check_access);
798
799 /*
800 * KCSAN uses the same instrumentation that is emitted by supported compilers
801 * for ThreadSanitizer (TSAN).
802 *
803 * When enabled, the compiler emits instrumentation calls (the functions
804 * prefixed with "__tsan" below) for all loads and stores that it generated;
805 * inline asm is not instrumented.
806 *
807 * Note that, not all supported compiler versions distinguish aligned/unaligned
808 * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
809 * version to the generic version, which can handle both.
810 */
811
812 #define DEFINE_TSAN_READ_WRITE(size) \
813 void __tsan_read##size(void *ptr); \
814 void __tsan_read##size(void *ptr) \
815 { \
816 check_access(ptr, size, 0); \
817 } \
818 EXPORT_SYMBOL(__tsan_read##size); \
819 void __tsan_unaligned_read##size(void *ptr) \
820 __alias(__tsan_read##size); \
821 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
822 void __tsan_write##size(void *ptr); \
823 void __tsan_write##size(void *ptr) \
824 { \
825 check_access(ptr, size, KCSAN_ACCESS_WRITE); \
826 } \
827 EXPORT_SYMBOL(__tsan_write##size); \
828 void __tsan_unaligned_write##size(void *ptr) \
829 __alias(__tsan_write##size); \
830 EXPORT_SYMBOL(__tsan_unaligned_write##size); \
831 void __tsan_read_write##size(void *ptr); \
832 void __tsan_read_write##size(void *ptr) \
833 { \
834 check_access(ptr, size, \
835 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
836 } \
837 EXPORT_SYMBOL(__tsan_read_write##size); \
838 void __tsan_unaligned_read_write##size(void *ptr) \
839 __alias(__tsan_read_write##size); \
840 EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
841
842 DEFINE_TSAN_READ_WRITE(1);
843 DEFINE_TSAN_READ_WRITE(2);
844 DEFINE_TSAN_READ_WRITE(4);
845 DEFINE_TSAN_READ_WRITE(8);
846 DEFINE_TSAN_READ_WRITE(16);
847
848 void __tsan_read_range(void *ptr, size_t size);
__tsan_read_range(void * ptr,size_t size)849 void __tsan_read_range(void *ptr, size_t size)
850 {
851 check_access(ptr, size, 0);
852 }
853 EXPORT_SYMBOL(__tsan_read_range);
854
855 void __tsan_write_range(void *ptr, size_t size);
__tsan_write_range(void * ptr,size_t size)856 void __tsan_write_range(void *ptr, size_t size)
857 {
858 check_access(ptr, size, KCSAN_ACCESS_WRITE);
859 }
860 EXPORT_SYMBOL(__tsan_write_range);
861
862 /*
863 * Use of explicit volatile is generally disallowed [1], however, volatile is
864 * still used in various concurrent context, whether in low-level
865 * synchronization primitives or for legacy reasons.
866 * [1] https://lwn.net/Articles/233479/
867 *
868 * We only consider volatile accesses atomic if they are aligned and would pass
869 * the size-check of compiletime_assert_rwonce_type().
870 */
871 #define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
872 void __tsan_volatile_read##size(void *ptr); \
873 void __tsan_volatile_read##size(void *ptr) \
874 { \
875 const bool is_atomic = size <= sizeof(long long) && \
876 IS_ALIGNED((unsigned long)ptr, size); \
877 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
878 return; \
879 check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
880 } \
881 EXPORT_SYMBOL(__tsan_volatile_read##size); \
882 void __tsan_unaligned_volatile_read##size(void *ptr) \
883 __alias(__tsan_volatile_read##size); \
884 EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
885 void __tsan_volatile_write##size(void *ptr); \
886 void __tsan_volatile_write##size(void *ptr) \
887 { \
888 const bool is_atomic = size <= sizeof(long long) && \
889 IS_ALIGNED((unsigned long)ptr, size); \
890 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
891 return; \
892 check_access(ptr, size, \
893 KCSAN_ACCESS_WRITE | \
894 (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
895 } \
896 EXPORT_SYMBOL(__tsan_volatile_write##size); \
897 void __tsan_unaligned_volatile_write##size(void *ptr) \
898 __alias(__tsan_volatile_write##size); \
899 EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
900
901 DEFINE_TSAN_VOLATILE_READ_WRITE(1);
902 DEFINE_TSAN_VOLATILE_READ_WRITE(2);
903 DEFINE_TSAN_VOLATILE_READ_WRITE(4);
904 DEFINE_TSAN_VOLATILE_READ_WRITE(8);
905 DEFINE_TSAN_VOLATILE_READ_WRITE(16);
906
907 /*
908 * The below are not required by KCSAN, but can still be emitted by the
909 * compiler.
910 */
911 void __tsan_func_entry(void *call_pc);
__tsan_func_entry(void * call_pc)912 void __tsan_func_entry(void *call_pc)
913 {
914 }
915 EXPORT_SYMBOL(__tsan_func_entry);
916 void __tsan_func_exit(void);
__tsan_func_exit(void)917 void __tsan_func_exit(void)
918 {
919 }
920 EXPORT_SYMBOL(__tsan_func_exit);
921 void __tsan_init(void);
__tsan_init(void)922 void __tsan_init(void)
923 {
924 }
925 EXPORT_SYMBOL(__tsan_init);
926
927 /*
928 * Instrumentation for atomic builtins (__atomic_*, __sync_*).
929 *
930 * Normal kernel code _should not_ be using them directly, but some
931 * architectures may implement some or all atomics using the compilers'
932 * builtins.
933 *
934 * Note: If an architecture decides to fully implement atomics using the
935 * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
936 * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
937 * atomic-instrumented) is no longer necessary.
938 *
939 * TSAN instrumentation replaces atomic accesses with calls to any of the below
940 * functions, whose job is to also execute the operation itself.
941 */
942
943 #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
944 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
945 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
946 { \
947 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
948 check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
949 } \
950 return __atomic_load_n(ptr, memorder); \
951 } \
952 EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
953 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
954 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
955 { \
956 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
957 check_access(ptr, bits / BITS_PER_BYTE, \
958 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
959 } \
960 __atomic_store_n(ptr, v, memorder); \
961 } \
962 EXPORT_SYMBOL(__tsan_atomic##bits##_store)
963
964 #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
965 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
966 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
967 { \
968 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
969 check_access(ptr, bits / BITS_PER_BYTE, \
970 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
971 KCSAN_ACCESS_ATOMIC); \
972 } \
973 return __atomic_##op##suffix(ptr, v, memorder); \
974 } \
975 EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
976
977 /*
978 * Note: CAS operations are always classified as write, even in case they
979 * fail. We cannot perform check_access() after a write, as it might lead to
980 * false positives, in cases such as:
981 *
982 * T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
983 *
984 * T1: if (__atomic_load_n(&p->flag, ...)) {
985 * modify *p;
986 * p->flag = 0;
987 * }
988 *
989 * The only downside is that, if there are 3 threads, with one CAS that
990 * succeeds, another CAS that fails, and an unmarked racing operation, we may
991 * point at the wrong CAS as the source of the race. However, if we assume that
992 * all CAS can succeed in some other execution, the data race is still valid.
993 */
994 #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
995 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
996 u##bits val, int mo, int fail_mo); \
997 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
998 u##bits val, int mo, int fail_mo) \
999 { \
1000 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1001 check_access(ptr, bits / BITS_PER_BYTE, \
1002 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1003 KCSAN_ACCESS_ATOMIC); \
1004 } \
1005 return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
1006 } \
1007 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
1008
1009 #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
1010 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1011 int mo, int fail_mo); \
1012 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1013 int mo, int fail_mo) \
1014 { \
1015 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1016 check_access(ptr, bits / BITS_PER_BYTE, \
1017 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1018 KCSAN_ACCESS_ATOMIC); \
1019 } \
1020 __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
1021 return exp; \
1022 } \
1023 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
1024
1025 #define DEFINE_TSAN_ATOMIC_OPS(bits) \
1026 DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
1027 DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
1028 DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
1029 DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
1030 DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
1031 DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
1032 DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
1033 DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
1034 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
1035 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
1036 DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
1037
1038 DEFINE_TSAN_ATOMIC_OPS(8);
1039 DEFINE_TSAN_ATOMIC_OPS(16);
1040 DEFINE_TSAN_ATOMIC_OPS(32);
1041 DEFINE_TSAN_ATOMIC_OPS(64);
1042
1043 void __tsan_atomic_thread_fence(int memorder);
__tsan_atomic_thread_fence(int memorder)1044 void __tsan_atomic_thread_fence(int memorder)
1045 {
1046 __atomic_thread_fence(memorder);
1047 }
1048 EXPORT_SYMBOL(__tsan_atomic_thread_fence);
1049
1050 void __tsan_atomic_signal_fence(int memorder);
__tsan_atomic_signal_fence(int memorder)1051 void __tsan_atomic_signal_fence(int memorder) { }
1052 EXPORT_SYMBOL(__tsan_atomic_signal_fence);
1053