1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_COMPILER_TYPES_H
3 #define __LINUX_COMPILER_TYPES_H
4
5 /*
6 * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
7 * In the meantime, to support gcc < 10, we implement __has_builtin
8 * by hand.
9 */
10 #ifndef __has_builtin
11 #define __has_builtin(x) (0)
12 #endif
13
14 #ifndef __ASSEMBLY__
15
16 /*
17 * Skipped when running bindgen due to a libclang issue;
18 * see https://github.com/rust-lang/rust-bindgen/issues/2244.
19 */
20 #if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \
21 __has_attribute(btf_type_tag) && !defined(__BINDGEN__)
22 # define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value)))
23 #else
24 # define BTF_TYPE_TAG(value) /* nothing */
25 #endif
26
27 /* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
28 #ifdef __CHECKER__
29 /* address spaces */
30 # define __kernel __attribute__((address_space(0)))
31 # define __user __attribute__((noderef, address_space(__user)))
32 # define __iomem __attribute__((noderef, address_space(__iomem)))
33 # define __percpu __attribute__((noderef, address_space(__percpu)))
34 # define __rcu __attribute__((noderef, address_space(__rcu)))
__chk_user_ptr(const volatile void __user * ptr)35 static inline void __chk_user_ptr(const volatile void __user *ptr) { }
__chk_io_ptr(const volatile void __iomem * ptr)36 static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
37 /* context/locking */
38 # define __must_hold(x) __attribute__((context(x,1,1)))
39 # define __acquires(x) __attribute__((context(x,0,1)))
40 # define __cond_acquires(x) __attribute__((context(x,0,-1)))
41 # define __releases(x) __attribute__((context(x,1,0)))
42 # define __acquire(x) __context__(x,1)
43 # define __release(x) __context__(x,-1)
44 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
45 /* other */
46 # define __force __attribute__((force))
47 # define __nocast __attribute__((nocast))
48 # define __safe __attribute__((safe))
49 # define __private __attribute__((noderef))
50 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
51 #else /* __CHECKER__ */
52 /* address spaces */
53 # define __kernel
54 # ifdef STRUCTLEAK_PLUGIN
55 # define __user __attribute__((user))
56 # else
57 # define __user BTF_TYPE_TAG(user)
58 # endif
59 # define __iomem
60 # define __percpu BTF_TYPE_TAG(percpu)
61 # define __rcu BTF_TYPE_TAG(rcu)
62
63 # define __chk_user_ptr(x) (void)0
64 # define __chk_io_ptr(x) (void)0
65 /* context/locking */
66 # define __must_hold(x)
67 # define __acquires(x)
68 # define __cond_acquires(x)
69 # define __releases(x)
70 # define __acquire(x) (void)0
71 # define __release(x) (void)0
72 # define __cond_lock(x,c) (c)
73 /* other */
74 # define __force
75 # define __nocast
76 # define __safe
77 # define __private
78 # define ACCESS_PRIVATE(p, member) ((p)->member)
79 # define __builtin_warning(x, y...) (1)
80 #endif /* __CHECKER__ */
81
82 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
83 #define ___PASTE(a,b) a##b
84 #define __PASTE(a,b) ___PASTE(a,b)
85
86 #ifdef __KERNEL__
87
88 /* Attributes */
89 #include <linux/compiler_attributes.h>
90
91 #if CONFIG_FUNCTION_ALIGNMENT > 0
92 #define __function_aligned __aligned(CONFIG_FUNCTION_ALIGNMENT)
93 #else
94 #define __function_aligned
95 #endif
96
97 /*
98 * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
99 * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
100 *
101 * When -falign-functions=N is in use, we must avoid the cold attribute as
102 * GCC drops the alignment for cold functions. Worse, GCC can implicitly mark
103 * callees of cold functions as cold themselves, so it's not sufficient to add
104 * __function_aligned here as that will not ensure that callees are correctly
105 * aligned.
106 *
107 * See:
108 *
109 * https://lore.kernel.org/lkml/Y77%2FqVgvaJidFpYt@FVFF77S0Q05N
110 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c9
111 */
112 #if defined(CONFIG_CC_HAS_SANE_FUNCTION_ALIGNMENT) || (CONFIG_FUNCTION_ALIGNMENT == 0)
113 #define __cold __attribute__((__cold__))
114 #else
115 #define __cold
116 #endif
117
118 /*
119 * On x86-64 and arm64 targets, __preserve_most changes the calling convention
120 * of a function to make the code in the caller as unintrusive as possible. This
121 * convention behaves identically to the C calling convention on how arguments
122 * and return values are passed, but uses a different set of caller- and callee-
123 * saved registers.
124 *
125 * The purpose is to alleviates the burden of saving and recovering a large
126 * register set before and after the call in the caller. This is beneficial for
127 * rarely taken slow paths, such as error-reporting functions that may be called
128 * from hot paths.
129 *
130 * Note: This may conflict with instrumentation inserted on function entry which
131 * does not use __preserve_most or equivalent convention (if in assembly). Since
132 * function tracing assumes the normal C calling convention, where the attribute
133 * is supported, __preserve_most implies notrace. It is recommended to restrict
134 * use of the attribute to functions that should or already disable tracing.
135 *
136 * Optional: not supported by gcc.
137 *
138 * clang: https://clang.llvm.org/docs/AttributeReference.html#preserve-most
139 */
140 #if __has_attribute(__preserve_most__) && (defined(CONFIG_X86_64) || defined(CONFIG_ARM64))
141 # define __preserve_most notrace __attribute__((__preserve_most__))
142 #else
143 # define __preserve_most
144 #endif
145
146 /*
147 * Annotating a function/variable with __retain tells the compiler to place
148 * the object in its own section and set the flag SHF_GNU_RETAIN. This flag
149 * instructs the linker to retain the object during garbage-cleanup or LTO
150 * phases.
151 *
152 * Note that the __used macro is also used to prevent functions or data
153 * being optimized out, but operates at the compiler/IR-level and may still
154 * allow unintended removal of objects during linking.
155 *
156 * Optional: only supported since gcc >= 11, clang >= 13
157 *
158 * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute
159 * clang: https://clang.llvm.org/docs/AttributeReference.html#retain
160 */
161 #if __has_attribute(__retain__) && \
162 (defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \
163 defined(CONFIG_LTO_CLANG))
164 # define __retain __attribute__((__retain__))
165 #else
166 # define __retain
167 #endif
168
169 /* Compiler specific macros. */
170 #ifdef __clang__
171 #include <linux/compiler-clang.h>
172 #elif defined(__GNUC__)
173 /* The above compilers also define __GNUC__, so order is important here. */
174 #include <linux/compiler-gcc.h>
175 #else
176 #error "Unknown compiler"
177 #endif
178
179 /*
180 * Some architectures need to provide custom definitions of macros provided
181 * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that
182 * conditionally rather than using an asm-generic wrapper in order to avoid
183 * build failures if any C compilation, which will include this file via an
184 * -include argument in c_flags, occurs prior to the asm-generic wrappers being
185 * generated.
186 */
187 #ifdef CONFIG_HAVE_ARCH_COMPILER_H
188 #include <asm/compiler.h>
189 #endif
190
191 struct ftrace_branch_data {
192 const char *func;
193 const char *file;
194 unsigned line;
195 union {
196 struct {
197 unsigned long correct;
198 unsigned long incorrect;
199 };
200 struct {
201 unsigned long miss;
202 unsigned long hit;
203 };
204 unsigned long miss_hit[2];
205 };
206 };
207
208 struct ftrace_likely_data {
209 struct ftrace_branch_data data;
210 unsigned long constant;
211 };
212
213 #if defined(CC_USING_HOTPATCH)
214 #define notrace __attribute__((hotpatch(0, 0)))
215 #elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY)
216 #define notrace __attribute__((patchable_function_entry(0, 0)))
217 #else
218 #define notrace __attribute__((__no_instrument_function__))
219 #endif
220
221 /*
222 * it doesn't make sense on ARM (currently the only user of __naked)
223 * to trace naked functions because then mcount is called without
224 * stack and frame pointer being set up and there is no chance to
225 * restore the lr register to the value before mcount was called.
226 */
227 #define __naked __attribute__((__naked__)) notrace
228
229 /*
230 * Prefer gnu_inline, so that extern inline functions do not emit an
231 * externally visible function. This makes extern inline behave as per gnu89
232 * semantics rather than c99. This prevents multiple symbol definition errors
233 * of extern inline functions at link time.
234 * A lot of inline functions can cause havoc with function tracing.
235 */
236 #define inline inline __gnu_inline __inline_maybe_unused notrace
237
238 /*
239 * gcc provides both __inline__ and __inline as alternate spellings of
240 * the inline keyword, though the latter is undocumented. New kernel
241 * code should only use the inline spelling, but some existing code
242 * uses __inline__. Since we #define inline above, to ensure
243 * __inline__ has the same semantics, we need this #define.
244 *
245 * However, the spelling __inline is strictly reserved for referring
246 * to the bare keyword.
247 */
248 #define __inline__ inline
249
250 /*
251 * GCC does not warn about unused static inline functions for -Wunused-function.
252 * Suppress the warning in clang as well by using __maybe_unused, but enable it
253 * for W=1 build. This will allow clang to find unused functions. Remove the
254 * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings.
255 */
256 #ifdef KBUILD_EXTRA_WARN1
257 #define __inline_maybe_unused
258 #else
259 #define __inline_maybe_unused __maybe_unused
260 #endif
261
262 /*
263 * Rather then using noinline to prevent stack consumption, use
264 * noinline_for_stack instead. For documentation reasons.
265 */
266 #define noinline_for_stack noinline
267
268 /*
269 * Sanitizer helper attributes: Because using __always_inline and
270 * __no_sanitize_* conflict, provide helper attributes that will either expand
271 * to __no_sanitize_* in compilation units where instrumentation is enabled
272 * (__SANITIZE_*__), or __always_inline in compilation units without
273 * instrumentation (__SANITIZE_*__ undefined).
274 */
275 #ifdef __SANITIZE_ADDRESS__
276 /*
277 * We can't declare function 'inline' because __no_sanitize_address conflicts
278 * with inlining. Attempt to inline it may cause a build failure.
279 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
280 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
281 */
282 # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
283 # define __no_sanitize_or_inline __no_kasan_or_inline
284 #else
285 # define __no_kasan_or_inline __always_inline
286 #endif
287
288 #ifdef __SANITIZE_THREAD__
289 /*
290 * Clang still emits instrumentation for __tsan_func_{entry,exit}() and builtin
291 * atomics even with __no_sanitize_thread (to avoid false positives in userspace
292 * ThreadSanitizer). The kernel's requirements are stricter and we really do not
293 * want any instrumentation with __no_kcsan.
294 *
295 * Therefore we add __disable_sanitizer_instrumentation where available to
296 * disable all instrumentation. See Kconfig.kcsan where this is mandatory.
297 */
298 # define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
299 /*
300 * Type qualifier to mark variables where all data-racy accesses should be
301 * ignored by KCSAN. Note, the implementation simply marks these variables as
302 * volatile, since KCSAN will treat such accesses as "marked".
303 */
304 # define __data_racy volatile
305 # define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
306 #else
307 # define __no_kcsan
308 # define __data_racy
309 #endif
310
311 #ifdef __SANITIZE_MEMORY__
312 /*
313 * Similarly to KASAN and KCSAN, KMSAN loses function attributes of inlined
314 * functions, therefore disabling KMSAN checks also requires disabling inlining.
315 *
316 * __no_sanitize_or_inline effectively prevents KMSAN from reporting errors
317 * within the function and marks all its outputs as initialized.
318 */
319 # define __no_sanitize_or_inline __no_kmsan_checks notrace __maybe_unused
320 #endif
321
322 #ifndef __no_sanitize_or_inline
323 #define __no_sanitize_or_inline __always_inline
324 #endif
325
326 /*
327 * Apply __counted_by() when the Endianness matches to increase test coverage.
328 */
329 #ifdef __LITTLE_ENDIAN
330 #define __counted_by_le(member) __counted_by(member)
331 #define __counted_by_be(member)
332 #else
333 #define __counted_by_le(member)
334 #define __counted_by_be(member) __counted_by(member)
335 #endif
336
337 /* Do not trap wrapping arithmetic within an annotated function. */
338 #ifdef CONFIG_UBSAN_SIGNED_WRAP
339 # define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow")))
340 #else
341 # define __signed_wrap
342 #endif
343
344 /* Section for code which can't be instrumented at all */
345 #define __noinstr_section(section) \
346 noinline notrace __attribute((__section__(section))) \
347 __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \
348 __no_sanitize_memory __signed_wrap
349
350 #define noinstr __noinstr_section(".noinstr.text")
351
352 /*
353 * The __cpuidle section is used twofold:
354 *
355 * 1) the original use -- identifying if a CPU is 'stuck' in idle state based
356 * on it's instruction pointer. See cpu_in_idle().
357 *
358 * 2) supressing instrumentation around where cpuidle disables RCU; where the
359 * function isn't strictly required for #1, this is interchangeable with
360 * noinstr.
361 */
362 #define __cpuidle __noinstr_section(".cpuidle.text")
363
364 #endif /* __KERNEL__ */
365
366 #endif /* __ASSEMBLY__ */
367
368 /*
369 * The below symbols may be defined for one or more, but not ALL, of the above
370 * compilers. We don't consider that to be an error, so set them to nothing.
371 * For example, some of them are for compiler specific plugins.
372 */
373 #ifndef __latent_entropy
374 # define __latent_entropy
375 #endif
376
377 #if defined(RANDSTRUCT) && !defined(__CHECKER__)
378 # define __randomize_layout __designated_init __attribute__((randomize_layout))
379 # define __no_randomize_layout __attribute__((no_randomize_layout))
380 /* This anon struct can add padding, so only enable it under randstruct. */
381 # define randomized_struct_fields_start struct {
382 # define randomized_struct_fields_end } __randomize_layout;
383 #else
384 # define __randomize_layout __designated_init
385 # define __no_randomize_layout
386 # define randomized_struct_fields_start
387 # define randomized_struct_fields_end
388 #endif
389
390 #ifndef __noscs
391 # define __noscs
392 #endif
393
394 #ifndef __nocfi
395 # define __nocfi
396 #endif
397
398 /*
399 * Any place that could be marked with the "alloc_size" attribute is also
400 * a place to be marked with the "malloc" attribute, except those that may
401 * be performing a _reallocation_, as that may alias the existing pointer.
402 * For these, use __realloc_size().
403 */
404 #ifdef __alloc_size__
405 # define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
406 # define __realloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__)
407 #else
408 # define __alloc_size(x, ...) __malloc
409 # define __realloc_size(x, ...)
410 #endif
411
412 /*
413 * When the size of an allocated object is needed, use the best available
414 * mechanism to find it. (For cases where sizeof() cannot be used.)
415 */
416 #if __has_builtin(__builtin_dynamic_object_size)
417 #define __struct_size(p) __builtin_dynamic_object_size(p, 0)
418 #define __member_size(p) __builtin_dynamic_object_size(p, 1)
419 #else
420 #define __struct_size(p) __builtin_object_size(p, 0)
421 #define __member_size(p) __builtin_object_size(p, 1)
422 #endif
423
424 /* Determine if an attribute has been applied to a variable. */
425 #if __has_builtin(__builtin_has_attribute)
426 #define __annotated(var, attr) __builtin_has_attribute(var, attr)
427 #else
428 #define __annotated(var, attr) (false)
429 #endif
430
431 /*
432 * Some versions of gcc do not mark 'asm goto' volatile:
433 *
434 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
435 *
436 * We do it here by hand, because it doesn't hurt.
437 */
438 #ifndef asm_goto_output
439 #define asm_goto_output(x...) asm volatile goto(x)
440 #endif
441
442 /*
443 * Clang has trouble with constraints with multiple
444 * alternative behaviors (mainly "g" and "rm").
445 */
446 #ifndef ASM_INPUT_G
447 #define ASM_INPUT_G "g"
448 #define ASM_INPUT_RM "rm"
449 #endif
450
451 #ifdef CONFIG_CC_HAS_ASM_INLINE
452 #define asm_inline asm __inline
453 #else
454 #define asm_inline asm
455 #endif
456
457 /* Are two types/vars the same type (ignoring qualifiers)? */
458 #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
459
460 /*
461 * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
462 * non-scalar types unchanged.
463 */
464 /*
465 * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
466 * is not type-compatible with 'signed char', and we define a separate case.
467 */
468 #define __scalar_type_to_expr_cases(type) \
469 unsigned type: (unsigned type)0, \
470 signed type: (signed type)0
471
472 #define __unqual_scalar_typeof(x) typeof( \
473 _Generic((x), \
474 char: (char)0, \
475 __scalar_type_to_expr_cases(char), \
476 __scalar_type_to_expr_cases(short), \
477 __scalar_type_to_expr_cases(int), \
478 __scalar_type_to_expr_cases(long), \
479 __scalar_type_to_expr_cases(long long), \
480 default: (x)))
481
482 /* Is this type a native word size -- useful for atomic operations */
483 #define __native_word(t) \
484 (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
485 sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
486
487 #ifdef __OPTIMIZE__
488 # define __compiletime_assert(condition, msg, prefix, suffix) \
489 do { \
490 /* \
491 * __noreturn is needed to give the compiler enough \
492 * information to avoid certain possibly-uninitialized \
493 * warnings (regardless of the build failing). \
494 */ \
495 __noreturn extern void prefix ## suffix(void) \
496 __compiletime_error(msg); \
497 if (!(condition)) \
498 prefix ## suffix(); \
499 } while (0)
500 #else
501 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
502 #endif
503
504 #define _compiletime_assert(condition, msg, prefix, suffix) \
505 __compiletime_assert(condition, msg, prefix, suffix)
506
507 /**
508 * compiletime_assert - break build and emit msg if condition is false
509 * @condition: a compile-time constant condition to check
510 * @msg: a message to emit if condition is false
511 *
512 * In tradition of POSIX assert, this macro will break the build if the
513 * supplied condition is *false*, emitting the supplied error message if the
514 * compiler has support to do so.
515 */
516 #define compiletime_assert(condition, msg) \
517 _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
518
519 #define compiletime_assert_atomic_type(t) \
520 compiletime_assert(__native_word(t), \
521 "Need native word sized stores/loads for atomicity.")
522
523 /* Helpers for emitting diagnostics in pragmas. */
524 #ifndef __diag
525 #define __diag(string)
526 #endif
527
528 #ifndef __diag_GCC
529 #define __diag_GCC(version, severity, string)
530 #endif
531
532 #define __diag_push() __diag(push)
533 #define __diag_pop() __diag(pop)
534
535 #define __diag_ignore(compiler, version, option, comment) \
536 __diag_ ## compiler(version, ignore, option)
537 #define __diag_warn(compiler, version, option, comment) \
538 __diag_ ## compiler(version, warn, option)
539 #define __diag_error(compiler, version, option, comment) \
540 __diag_ ## compiler(version, error, option)
541
542 #ifndef __diag_ignore_all
543 #define __diag_ignore_all(option, comment)
544 #endif
545
546 #endif /* __LINUX_COMPILER_TYPES_H */
547