1 /*
2  * include/haproxy/compiler.h
3  * This files contains some compiler-specific settings.
4  *
5  * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation, version 2.1
10  * exclusively.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #ifndef _HAPROXY_COMPILER_H
23 #define _HAPROXY_COMPILER_H
24 
25 
26 /*
27  * Gcc before 3.0 needs [0] to declare a variable-size array
28  */
29 #ifndef VAR_ARRAY
30 #if defined(__GNUC__) && (__GNUC__ < 3)
31 #define VAR_ARRAY	0
32 #else
33 #define VAR_ARRAY
34 #endif
35 #endif
36 
37 #if !defined(__GNUC__)
38 /* Some versions of glibc irresponsibly redefine __attribute__() to empty for
39  * non-gcc compilers, and as such, silently break all constructors with other
40  * other compilers. Let's make sure such incompatibilities are detected if any,
41  * or that the attribute is properly enforced.
42  */
43 #undef __attribute__
44 #define __attribute__(x) __attribute__(x)
45 #endif
46 
47 /* By default, gcc does not inline large chunks of code, but we want it to
48  * respect our choices.
49  */
50 #if !defined(forceinline)
51 #if !defined(__GNUC__) || (__GNUC__ < 3)
52 #define forceinline inline
53 #else
54 #define forceinline inline __attribute__((always_inline))
55 #endif
56 #endif
57 
58 /* silence the "unused" warnings without having to place painful #ifdefs.
59  * For use with variables or functions.
60  */
61 #define __maybe_unused __attribute__((unused))
62 
63 /* This allows gcc to know that some locations are never reached, for example
64  * after a longjmp() in the Lua code, hence that some errors caught by such
65  * methods cannot propagate further. This is important with gcc versions 6 and
66  * above which can more aggressively detect null dereferences. The builtin
67  * below was introduced in gcc 4.5, and before it we didn't care.
68  */
69 #if __GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
70 #define my_unreachable() __builtin_unreachable()
71 #else
72 #define my_unreachable()
73 #endif
74 
75 /* This macro may be used to block constant propagation that lets the compiler
76  * detect a possible NULL dereference on a variable resulting from an explicit
77  * assignment in an impossible check. Sometimes a function is called which does
78  * safety checks and returns NULL if safe conditions are not met. The place
79  * where it's called cannot hit this condition and dereferencing the pointer
80  * without first checking it will make the compiler emit a warning about a
81  * "potential null pointer dereference" which is hard to work around. This
82  * macro "washes" the pointer and prevents the compiler from emitting tests
83  * branching to undefined instructions. It may only be used when the developer
84  * is absolutely certain that the conditions are guaranteed and that the
85  * pointer passed in argument cannot be NULL by design.
86  */
87 #define ALREADY_CHECKED(p) do { asm("" : "=rm"(p) : "0"(p)); } while (0)
88 
89 /* same as above but to be used to pass the input value to the output but
90  * without letting the compiler know about its initial properties.
91  */
92 #define DISGUISE(v) ({ typeof(v) __v = (v); ALREADY_CHECKED(__v); __v; })
93 
94 /* Implements a static event counter where it's used. This is typically made to
95  * report some warnings only once, either during boot or at runtime. It only
96  * returns true on the very first call, and zero later. It's thread-safe and
97  * uses a single byte of memory per call place. It relies on the atomic xchg
98  * defined in atomic.h which is also part of the common API.
99  */
100 #define ONLY_ONCE() ({ static char __cnt; !_HA_ATOMIC_XCHG(&__cnt, 1); })
101 
102 /*
103  * Gcc >= 3 provides the ability for the program to give hints to the
104  * compiler about what branch of an if is most likely to be taken. This
105  * helps the compiler produce the most compact critical paths, which is
106  * generally better for the cache and to reduce the number of jumps.
107  */
108 #if !defined(likely)
109 #if !defined(__GNUC__) || (__GNUC__ < 3)
110 #define __builtin_expect(x,y) (x)
111 #define likely(x) (x)
112 #define unlikely(x) (x)
113 #else
114 #define likely(x) (__builtin_expect((x) != 0, 1))
115 #define unlikely(x) (__builtin_expect((x) != 0, 0))
116 #endif
117 #endif
118 
119 #ifndef __GNUC_PREREQ__
120 #if defined(__GNUC__) && !defined(__INTEL_COMPILER)
121 #define __GNUC_PREREQ__(ma, mi) \
122         (__GNUC__ > (ma) || __GNUC__ == (ma) && __GNUC_MINOR__ >= (mi))
123 #else
124 #define __GNUC_PREREQ__(ma, mi) 0
125 #endif
126 #endif
127 
128 #ifndef offsetof
129 #if __GNUC_PREREQ__(4, 1)
130 #define offsetof(type, field)  __builtin_offsetof(type, field)
131 #else
132 #define offsetof(type, field) \
133         ((size_t)(uintptr_t)((const volatile void *)&((type *)0)->field))
134 #endif
135 #endif
136 
137 /* Some architectures have a double-word CAS, sometimes even dual-8 bytes.
138  * Some architectures support unaligned accesses, others are fine with them
139  * but only for non-atomic operations. Also mention those supporting unaligned
140  * accesses and being little endian, and those where unaligned accesses are
141  * known to be fast (almost as fast as aligned ones).
142  */
143 #if defined(__x86_64__)
144 #define HA_UNALIGNED
145 #define HA_UNALIGNED_LE
146 #define HA_UNALIGNED_LE64
147 #define HA_UNALIGNED_FAST
148 #define HA_UNALIGNED_ATOMIC
149 #define HA_HAVE_CAS_DW
150 #define HA_CAS_IS_8B
151 #elif defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
152 #define HA_UNALIGNED
153 #define HA_UNALIGNED_LE
154 #define HA_UNALIGNED_ATOMIC
155 #elif defined (__aarch64__) || defined(__ARM_ARCH_8A)
156 #define HA_UNALIGNED
157 #define HA_UNALIGNED_LE
158 #define HA_UNALIGNED_LE64
159 #define HA_UNALIGNED_FAST
160 #define HA_HAVE_CAS_DW
161 #define HA_CAS_IS_8B
162 #elif defined(__arm__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__))
163 #define HA_UNALIGNED
164 #define HA_UNALIGNED_LE
165 #define HA_UNALIGNED_FAST
166 #define HA_HAVE_CAS_DW
167 #endif
168 
169 
170 /* sets alignment for current field or variable */
171 #ifndef ALIGNED
172 #define ALIGNED(x) __attribute__((aligned(x)))
173 #endif
174 
175 /* sets alignment only on architectures preventing unaligned atomic accesses */
176 #ifndef MAYBE_ALIGNED
177 #ifndef HA_UNALIGNED
178 #define MAYBE_ALIGNED(x)  ALIGNED(x)
179 #else
180 #define MAYBE_ALIGNED(x)
181 #endif
182 #endif
183 
184 /* sets alignment only on architectures preventing unaligned atomic accesses */
185 #ifndef ATOMIC_ALIGNED
186 #ifndef HA_UNALIGNED_ATOMIC
187 #define ATOMIC_ALIGNED(x)  ALIGNED(x)
188 #else
189 #define ATOMIC_ALIGNED(x)
190 #endif
191 #endif
192 
193 /* sets alignment for current field or variable only when threads are enabled.
194  * Typically used to respect cache line alignment to avoid false sharing.
195  */
196 #ifndef THREAD_ALIGNED
197 #ifdef USE_THREAD
198 #define THREAD_ALIGNED(x) __attribute__((aligned(x)))
199 #else
200 #define THREAD_ALIGNED(x)
201 #endif
202 #endif
203 
204 /* add a mandatory alignment for next fields in a structure */
205 #ifndef ALWAYS_ALIGN
206 #define ALWAYS_ALIGN(x)  union { } ALIGNED(x)
207 #endif
208 
209 /* add an optional alignment for next fields in a structure, only for archs
210  * which do not support unaligned accesses.
211  */
212 #ifndef MAYBE_ALIGN
213 #ifndef HA_UNALIGNED
214 #define MAYBE_ALIGN(x)  union { } ALIGNED(x)
215 #else
216 #define MAYBE_ALIGN(x)
217 #endif
218 #endif
219 
220 /* add an optional alignment for next fields in a structure, only for archs
221  * which do not support unaligned accesses for atomic operations.
222  */
223 #ifndef ATOMIC_ALIGN
224 #ifndef HA_UNALIGNED_ATOMIC
225 #define ATOMIC_ALIGN(x)  union { } ALIGNED(x)
226 #else
227 #define ATOMIC_ALIGN(x)
228 #endif
229 #endif
230 
231 /* add an optional alignment for next fields in a structure, only when threads
232  * are enabled. Typically used to respect cache line alignment to avoid false
233  * sharing.
234  */
235 #ifndef THREAD_ALIGN
236 #ifdef USE_THREAD
237 #define THREAD_ALIGN(x) union { } ALIGNED(x)
238 #else
239 #define THREAD_ALIGN(x)
240 #endif
241 #endif
242 
243 /* The THREAD_LOCAL type attribute defines thread-local storage and is defined
244  * to __thread when threads are enabled or empty when disabled.
245  */
246 #ifdef USE_THREAD
247 #define THREAD_LOCAL __thread
248 #else
249 #define THREAD_LOCAL
250 #endif
251 
252 /* The __decl_thread() statement is shows the argument when threads are enabled
253  * or hides it when disabled. The purpose is to condition the presence of some
254  * variables or struct members to the fact that threads are enabled, without
255  * having to enclose them inside a #ifdef USE_THREAD/#endif clause.
256  */
257 #ifdef USE_THREAD
258 #define __decl_thread(decl) decl
259 #else
260 #define __decl_thread(decl)
261 #endif
262 
263 /* clang has a __has_feature() macro which reports true/false on a number of
264  * internally supported features. Let's make sure this macro is always defined
265  * and returns zero when not supported.
266  */
267 #ifndef __has_feature
268 #define __has_feature(x) 0
269 #endif
270 
271 #endif /* _HAPROXY_COMPILER_H */
272