1 // Copyright 2015, ARM Limited
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #ifndef VIXL_UTILS_H
28 #define VIXL_UTILS_H
29 
30 #include <string.h>
31 #include <cmath>
32 #include "vixl/globals.h"
33 #include "vixl/compiler-intrinsics.h"
34 
35 namespace vixl {
36 
37 // Macros for compile-time format checking.
38 #if GCC_VERSION_OR_NEWER(4, 4, 0)
39 #define PRINTF_CHECK(format_index, varargs_index) \
40   __attribute__((format(gnu_printf, format_index, varargs_index)))
41 #else
42 #define PRINTF_CHECK(format_index, varargs_index)
43 #endif
44 
45 #ifndef INT64_C
46 #define INT32_C(c) c
47 #define INT64_C(c) (c ## LL)
48 #define UINT32_C(c) (c ## U)
49 #define UINT64_C(c) (c ## ULL)
50 #endif
51 
52 // Check number width.
is_intn(unsigned n,int64_t x)53 inline bool is_intn(unsigned n, int64_t x) {
54   VIXL_ASSERT((0 < n) && (n < 64));
55   int64_t limit = INT64_C(1) << (n - 1);
56   return (-limit <= x) && (x < limit);
57 }
58 
is_uintn(unsigned n,int64_t x)59 inline bool is_uintn(unsigned n, int64_t x) {
60   VIXL_ASSERT((0 < n) && (n < 64));
61   return !(x >> n);
62 }
63 
truncate_to_intn(unsigned n,int64_t x)64 inline uint32_t truncate_to_intn(unsigned n, int64_t x) {
65   VIXL_ASSERT((0 < n) && (n < 64));
66   return static_cast<uint32_t>(x & ((INT64_C(1) << n) - 1));
67 }
68 
69 #define INT_1_TO_63_LIST(V)                                                    \
70 V(1)  V(2)  V(3)  V(4)  V(5)  V(6)  V(7)  V(8)                                 \
71 V(9)  V(10) V(11) V(12) V(13) V(14) V(15) V(16)                                \
72 V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24)                                \
73 V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32)                                \
74 V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40)                                \
75 V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48)                                \
76 V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56)                                \
77 V(57) V(58) V(59) V(60) V(61) V(62) V(63)
78 
79 #define DECLARE_IS_INT_N(N)                                                    \
80 inline bool is_int##N(int64_t x) { return is_intn(N, x); }
81 #define DECLARE_IS_UINT_N(N)                                                   \
82 inline bool is_uint##N(int64_t x) { return is_uintn(N, x); }
83 #define DECLARE_TRUNCATE_TO_INT_N(N)                                           \
84 inline uint32_t truncate_to_int##N(int x) { return truncate_to_intn(N, x); }
85 INT_1_TO_63_LIST(DECLARE_IS_INT_N)
INT_1_TO_63_LIST(DECLARE_IS_UINT_N)86 INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
87 INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
88 #undef DECLARE_IS_INT_N
89 #undef DECLARE_IS_UINT_N
90 #undef DECLARE_TRUNCATE_TO_INT_N
91 
92 // Bit field extraction.
93 inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
94   return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
95 }
96 
unsigned_bitextract_64(int msb,int lsb,uint64_t x)97 inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
98   return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
99 }
100 
signed_bitextract_32(int msb,int lsb,int32_t x)101 inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
102   return (x << (31 - msb)) >> (lsb + 31 - msb);
103 }
104 
signed_bitextract_64(int msb,int lsb,int64_t x)105 inline int64_t signed_bitextract_64(int msb, int lsb, int64_t x) {
106   return (x << (63 - msb)) >> (lsb + 63 - msb);
107 }
108 
109 // Floating point representation.
110 uint32_t float_to_rawbits(float value);
111 uint64_t double_to_rawbits(double value);
112 float rawbits_to_float(uint32_t bits);
113 double rawbits_to_double(uint64_t bits);
114 
115 uint32_t float_sign(float val);
116 uint32_t float_exp(float val);
117 uint32_t float_mantissa(float val);
118 uint32_t double_sign(double val);
119 uint32_t double_exp(double val);
120 uint64_t double_mantissa(double val);
121 
122 float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa);
123 double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa);
124 
125 // An fpclassify() function for 16-bit half-precision floats.
126 int float16classify(float16 value);
127 
128 // NaN tests.
IsSignallingNaN(double num)129 inline bool IsSignallingNaN(double num) {
130   const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
131   uint64_t raw = double_to_rawbits(num);
132   if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
133     return true;
134   }
135   return false;
136 }
137 
138 
IsSignallingNaN(float num)139 inline bool IsSignallingNaN(float num) {
140   const uint32_t kFP32QuietNaNMask = 0x00400000;
141   uint32_t raw = float_to_rawbits(num);
142   if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
143     return true;
144   }
145   return false;
146 }
147 
148 
IsSignallingNaN(float16 num)149 inline bool IsSignallingNaN(float16 num) {
150   const uint16_t kFP16QuietNaNMask = 0x0200;
151   return (float16classify(num) == FP_NAN) &&
152          ((num & kFP16QuietNaNMask) == 0);
153 }
154 
155 
156 template <typename T>
IsQuietNaN(T num)157 inline bool IsQuietNaN(T num) {
158   return std::isnan(num) && !IsSignallingNaN(num);
159 }
160 
161 
162 // Convert the NaN in 'num' to a quiet NaN.
ToQuietNaN(double num)163 inline double ToQuietNaN(double num) {
164   const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
165   VIXL_ASSERT(std::isnan(num));
166   return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask);
167 }
168 
169 
ToQuietNaN(float num)170 inline float ToQuietNaN(float num) {
171   const uint32_t kFP32QuietNaNMask = 0x00400000;
172   VIXL_ASSERT(std::isnan(num));
173   return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask);
174 }
175 
176 
177 // Fused multiply-add.
FusedMultiplyAdd(double op1,double op2,double a)178 inline double FusedMultiplyAdd(double op1, double op2, double a) {
179   return fma(op1, op2, a);
180 }
181 
182 
FusedMultiplyAdd(float op1,float op2,float a)183 inline float FusedMultiplyAdd(float op1, float op2, float a) {
184   return fmaf(op1, op2, a);
185 }
186 
187 
LowestSetBit(uint64_t value)188 inline uint64_t LowestSetBit(uint64_t value) {
189   return value & -value;
190 }
191 
192 
193 template<typename T>
HighestSetBitPosition(T value)194 inline int HighestSetBitPosition(T value) {
195   VIXL_ASSERT(value != 0);
196   return (sizeof(value) * 8 - 1) - CountLeadingZeros(value);
197 }
198 
199 
200 template<typename V>
WhichPowerOf2(V value)201 inline int WhichPowerOf2(V value) {
202   VIXL_ASSERT(IsPowerOf2(value));
203   return CountTrailingZeros(value);
204 }
205 
206 
207 unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
208 
209 
210 template <typename T>
ReverseBits(T value)211 T ReverseBits(T value) {
212   VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
213               (sizeof(value) == 4) || (sizeof(value) == 8));
214   T result = 0;
215   for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
216     result = (result << 1) | (value & 1);
217     value >>= 1;
218   }
219   return result;
220 }
221 
222 
223 template <typename T>
ReverseBytes(T value,int block_bytes_log2)224 T ReverseBytes(T value, int block_bytes_log2) {
225   VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8));
226   VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value));
227   // Split the 64-bit value into an 8-bit array, where b[0] is the least
228   // significant byte, and b[7] is the most significant.
229   uint8_t bytes[8];
230   uint64_t mask = UINT64_C(0xff00000000000000);
231   for (int i = 7; i >= 0; i--) {
232     bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
233     mask >>= 8;
234   }
235 
236   // Permutation tables for REV instructions.
237   //  permute_table[0] is used by REV16_x, REV16_w
238   //  permute_table[1] is used by REV32_x, REV_w
239   //  permute_table[2] is used by REV_x
240   VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4));
241   static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
242                                                {4, 5, 6, 7, 0, 1, 2, 3},
243                                                {0, 1, 2, 3, 4, 5, 6, 7} };
244   T result = 0;
245   for (int i = 0; i < 8; i++) {
246     result <<= 8;
247     result |= bytes[permute_table[block_bytes_log2 - 1][i]];
248   }
249   return result;
250 }
251 
252 
253 // Pointer alignment
254 // TODO: rename/refactor to make it specific to instructions.
255 template<typename T>
IsWordAligned(T pointer)256 bool IsWordAligned(T pointer) {
257   VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t));   // NOLINT(runtime/sizeof)
258   return ((intptr_t)(pointer) & 3) == 0;
259 }
260 
261 // Increment a pointer (up to 64 bits) until it has the specified alignment.
262 template<class T>
AlignUp(T pointer,size_t alignment)263 T AlignUp(T pointer, size_t alignment) {
264   // Use C-style casts to get static_cast behaviour for integral types (T), and
265   // reinterpret_cast behaviour for other types.
266 
267   uint64_t pointer_raw = (uint64_t)pointer;
268   VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
269 
270   size_t align_step = (alignment - pointer_raw) % alignment;
271   VIXL_ASSERT((pointer_raw + align_step) % alignment == 0);
272 
273   return (T)(pointer_raw + align_step);
274 }
275 
276 // Decrement a pointer (up to 64 bits) until it has the specified alignment.
277 template<class T>
AlignDown(T pointer,size_t alignment)278 T AlignDown(T pointer, size_t alignment) {
279   // Use C-style casts to get static_cast behaviour for integral types (T), and
280   // reinterpret_cast behaviour for other types.
281 
282   uint64_t pointer_raw = (uint64_t)pointer;
283   VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
284 
285   size_t align_step = pointer_raw % alignment;
286   VIXL_ASSERT((pointer_raw - align_step) % alignment == 0);
287 
288   return (T)(pointer_raw - align_step);
289 }
290 
291 }  // namespace vixl
292 
293 #endif  // VIXL_UTILS_H
294