xref: /linux/mm/kmsan/instrumentation.c (revision c6fbb759)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KMSAN compiler API.
4  *
5  * This file implements __msan_XXX hooks that Clang inserts into the code
6  * compiled with -fsanitize=kernel-memory.
7  * See Documentation/dev-tools/kmsan.rst for more information on how KMSAN
8  * instrumentation works.
9  *
10  * Copyright (C) 2017-2022 Google LLC
11  * Author: Alexander Potapenko <glider@google.com>
12  *
13  */
14 
15 #include "kmsan.h"
16 #include <linux/gfp.h>
17 #include <linux/mm.h>
18 #include <linux/uaccess.h>
19 
20 static inline bool is_bad_asm_addr(void *addr, uintptr_t size, bool is_store)
21 {
22 	if ((u64)addr < TASK_SIZE)
23 		return true;
24 	if (!kmsan_get_metadata(addr, KMSAN_META_SHADOW))
25 		return true;
26 	return false;
27 }
28 
29 static inline struct shadow_origin_ptr
30 get_shadow_origin_ptr(void *addr, u64 size, bool store)
31 {
32 	unsigned long ua_flags = user_access_save();
33 	struct shadow_origin_ptr ret;
34 
35 	ret = kmsan_get_shadow_origin_ptr(addr, size, store);
36 	user_access_restore(ua_flags);
37 	return ret;
38 }
39 
40 /* Get shadow and origin pointers for a memory load with non-standard size. */
41 struct shadow_origin_ptr __msan_metadata_ptr_for_load_n(void *addr,
42 							uintptr_t size)
43 {
44 	return get_shadow_origin_ptr(addr, size, /*store*/ false);
45 }
46 EXPORT_SYMBOL(__msan_metadata_ptr_for_load_n);
47 
48 /* Get shadow and origin pointers for a memory store with non-standard size. */
49 struct shadow_origin_ptr __msan_metadata_ptr_for_store_n(void *addr,
50 							 uintptr_t size)
51 {
52 	return get_shadow_origin_ptr(addr, size, /*store*/ true);
53 }
54 EXPORT_SYMBOL(__msan_metadata_ptr_for_store_n);
55 
56 /*
57  * Declare functions that obtain shadow/origin pointers for loads and stores
58  * with fixed size.
59  */
60 #define DECLARE_METADATA_PTR_GETTER(size)                                  \
61 	struct shadow_origin_ptr __msan_metadata_ptr_for_load_##size(      \
62 		void *addr)                                                \
63 	{                                                                  \
64 		return get_shadow_origin_ptr(addr, size, /*store*/ false); \
65 	}                                                                  \
66 	EXPORT_SYMBOL(__msan_metadata_ptr_for_load_##size);                \
67 	struct shadow_origin_ptr __msan_metadata_ptr_for_store_##size(     \
68 		void *addr)                                                \
69 	{                                                                  \
70 		return get_shadow_origin_ptr(addr, size, /*store*/ true);  \
71 	}                                                                  \
72 	EXPORT_SYMBOL(__msan_metadata_ptr_for_store_##size)
73 
74 DECLARE_METADATA_PTR_GETTER(1);
75 DECLARE_METADATA_PTR_GETTER(2);
76 DECLARE_METADATA_PTR_GETTER(4);
77 DECLARE_METADATA_PTR_GETTER(8);
78 
79 /*
80  * Handle a memory store performed by inline assembly. KMSAN conservatively
81  * attempts to unpoison the outputs of asm() directives to prevent false
82  * positives caused by missed stores.
83  */
84 void __msan_instrument_asm_store(void *addr, uintptr_t size)
85 {
86 	unsigned long ua_flags;
87 
88 	if (!kmsan_enabled || kmsan_in_runtime())
89 		return;
90 
91 	ua_flags = user_access_save();
92 	/*
93 	 * Most of the accesses are below 32 bytes. The two exceptions so far
94 	 * are clwb() (64 bytes) and FPU state (512 bytes).
95 	 * It's unlikely that the assembly will touch more than 512 bytes.
96 	 */
97 	if (size > 512) {
98 		WARN_ONCE(1, "assembly store size too big: %ld\n", size);
99 		size = 8;
100 	}
101 	if (is_bad_asm_addr(addr, size, /*is_store*/ true)) {
102 		user_access_restore(ua_flags);
103 		return;
104 	}
105 	kmsan_enter_runtime();
106 	/* Unpoisoning the memory on best effort. */
107 	kmsan_internal_unpoison_memory(addr, size, /*checked*/ false);
108 	kmsan_leave_runtime();
109 	user_access_restore(ua_flags);
110 }
111 EXPORT_SYMBOL(__msan_instrument_asm_store);
112 
113 /*
114  * KMSAN instrumentation pass replaces LLVM memcpy, memmove and memset
115  * intrinsics with calls to respective __msan_ functions. We use
116  * get_param0_metadata() and set_retval_metadata() to store the shadow/origin
117  * values for the destination argument of these functions and use them for the
118  * functions' return values.
119  */
120 static inline void get_param0_metadata(u64 *shadow,
121 				       depot_stack_handle_t *origin)
122 {
123 	struct kmsan_ctx *ctx = kmsan_get_context();
124 
125 	*shadow = *(u64 *)(ctx->cstate.param_tls);
126 	*origin = ctx->cstate.param_origin_tls[0];
127 }
128 
129 static inline void set_retval_metadata(u64 shadow, depot_stack_handle_t origin)
130 {
131 	struct kmsan_ctx *ctx = kmsan_get_context();
132 
133 	*(u64 *)(ctx->cstate.retval_tls) = shadow;
134 	ctx->cstate.retval_origin_tls = origin;
135 }
136 
137 /* Handle llvm.memmove intrinsic. */
138 void *__msan_memmove(void *dst, const void *src, uintptr_t n)
139 {
140 	depot_stack_handle_t origin;
141 	void *result;
142 	u64 shadow;
143 
144 	get_param0_metadata(&shadow, &origin);
145 	result = __memmove(dst, src, n);
146 	if (!n)
147 		/* Some people call memmove() with zero length. */
148 		return result;
149 	if (!kmsan_enabled || kmsan_in_runtime())
150 		return result;
151 
152 	kmsan_enter_runtime();
153 	kmsan_internal_memmove_metadata(dst, (void *)src, n);
154 	kmsan_leave_runtime();
155 
156 	set_retval_metadata(shadow, origin);
157 	return result;
158 }
159 EXPORT_SYMBOL(__msan_memmove);
160 
161 /* Handle llvm.memcpy intrinsic. */
162 void *__msan_memcpy(void *dst, const void *src, uintptr_t n)
163 {
164 	depot_stack_handle_t origin;
165 	void *result;
166 	u64 shadow;
167 
168 	get_param0_metadata(&shadow, &origin);
169 	result = __memcpy(dst, src, n);
170 	if (!n)
171 		/* Some people call memcpy() with zero length. */
172 		return result;
173 
174 	if (!kmsan_enabled || kmsan_in_runtime())
175 		return result;
176 
177 	kmsan_enter_runtime();
178 	/* Using memmove instead of memcpy doesn't affect correctness. */
179 	kmsan_internal_memmove_metadata(dst, (void *)src, n);
180 	kmsan_leave_runtime();
181 
182 	set_retval_metadata(shadow, origin);
183 	return result;
184 }
185 EXPORT_SYMBOL(__msan_memcpy);
186 
187 /* Handle llvm.memset intrinsic. */
188 void *__msan_memset(void *dst, int c, uintptr_t n)
189 {
190 	depot_stack_handle_t origin;
191 	void *result;
192 	u64 shadow;
193 
194 	get_param0_metadata(&shadow, &origin);
195 	result = __memset(dst, c, n);
196 	if (!kmsan_enabled || kmsan_in_runtime())
197 		return result;
198 
199 	kmsan_enter_runtime();
200 	/*
201 	 * Clang doesn't pass parameter metadata here, so it is impossible to
202 	 * use shadow of @c to set up the shadow for @dst.
203 	 */
204 	kmsan_internal_unpoison_memory(dst, n, /*checked*/ false);
205 	kmsan_leave_runtime();
206 
207 	set_retval_metadata(shadow, origin);
208 	return result;
209 }
210 EXPORT_SYMBOL(__msan_memset);
211 
212 /*
213  * Create a new origin from an old one. This is done when storing an
214  * uninitialized value to memory. When reporting an error, KMSAN unrolls and
215  * prints the whole chain of stores that preceded the use of this value.
216  */
217 depot_stack_handle_t __msan_chain_origin(depot_stack_handle_t origin)
218 {
219 	depot_stack_handle_t ret = 0;
220 	unsigned long ua_flags;
221 
222 	if (!kmsan_enabled || kmsan_in_runtime())
223 		return ret;
224 
225 	ua_flags = user_access_save();
226 
227 	/* Creating new origins may allocate memory. */
228 	kmsan_enter_runtime();
229 	ret = kmsan_internal_chain_origin(origin);
230 	kmsan_leave_runtime();
231 	user_access_restore(ua_flags);
232 	return ret;
233 }
234 EXPORT_SYMBOL(__msan_chain_origin);
235 
236 /* Poison a local variable when entering a function. */
237 void __msan_poison_alloca(void *address, uintptr_t size, char *descr)
238 {
239 	depot_stack_handle_t handle;
240 	unsigned long entries[4];
241 	unsigned long ua_flags;
242 
243 	if (!kmsan_enabled || kmsan_in_runtime())
244 		return;
245 
246 	ua_flags = user_access_save();
247 	entries[0] = KMSAN_ALLOCA_MAGIC_ORIGIN;
248 	entries[1] = (u64)descr;
249 	entries[2] = (u64)__builtin_return_address(0);
250 	/*
251 	 * With frame pointers enabled, it is possible to quickly fetch the
252 	 * second frame of the caller stack without calling the unwinder.
253 	 * Without them, simply do not bother.
254 	 */
255 	if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER))
256 		entries[3] = (u64)__builtin_return_address(1);
257 	else
258 		entries[3] = 0;
259 
260 	/* stack_depot_save() may allocate memory. */
261 	kmsan_enter_runtime();
262 	handle = stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC);
263 	kmsan_leave_runtime();
264 
265 	kmsan_internal_set_shadow_origin(address, size, -1, handle,
266 					 /*checked*/ true);
267 	user_access_restore(ua_flags);
268 }
269 EXPORT_SYMBOL(__msan_poison_alloca);
270 
271 /* Unpoison a local variable. */
272 void __msan_unpoison_alloca(void *address, uintptr_t size)
273 {
274 	if (!kmsan_enabled || kmsan_in_runtime())
275 		return;
276 
277 	kmsan_enter_runtime();
278 	kmsan_internal_unpoison_memory(address, size, /*checked*/ true);
279 	kmsan_leave_runtime();
280 }
281 EXPORT_SYMBOL(__msan_unpoison_alloca);
282 
283 /*
284  * Report that an uninitialized value with the given origin was used in a way
285  * that constituted undefined behavior.
286  */
287 void __msan_warning(u32 origin)
288 {
289 	if (!kmsan_enabled || kmsan_in_runtime())
290 		return;
291 	kmsan_enter_runtime();
292 	kmsan_report(origin, /*address*/ 0, /*size*/ 0,
293 		     /*off_first*/ 0, /*off_last*/ 0, /*user_addr*/ 0,
294 		     REASON_ANY);
295 	kmsan_leave_runtime();
296 }
297 EXPORT_SYMBOL(__msan_warning);
298 
299 /*
300  * At the beginning of an instrumented function, obtain the pointer to
301  * `struct kmsan_context_state` holding the metadata for function parameters.
302  */
303 struct kmsan_context_state *__msan_get_context_state(void)
304 {
305 	return &kmsan_get_context()->cstate;
306 }
307 EXPORT_SYMBOL(__msan_get_context_state);
308