1 // Copyright 2015, ARM Limited
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #include "jit/arm64/vixl/Instructions-vixl.h"
28
29 #include "jit/arm64/vixl/Assembler-vixl.h"
30
31 namespace vixl {
32
33
34 // Floating-point infinity values.
35 const float16 kFP16PositiveInfinity = 0x7c00;
36 const float16 kFP16NegativeInfinity = 0xfc00;
37 const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
38 const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
39 const double kFP64PositiveInfinity =
40 rawbits_to_double(UINT64_C(0x7ff0000000000000));
41 const double kFP64NegativeInfinity =
42 rawbits_to_double(UINT64_C(0xfff0000000000000));
43
44
45 // The default NaN values (for FPCR.DN=1).
46 const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
47 const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
48 const float16 kFP16DefaultNaN = 0x7e00;
49
50
RotateRight(uint64_t value,unsigned int rotate,unsigned int width)51 static uint64_t RotateRight(uint64_t value,
52 unsigned int rotate,
53 unsigned int width) {
54 VIXL_ASSERT(width <= 64);
55 rotate &= 63;
56 return ((value & ((UINT64_C(1) << rotate) - 1)) <<
57 (width - rotate)) | (value >> rotate);
58 }
59
60
RepeatBitsAcrossReg(unsigned reg_size,uint64_t value,unsigned width)61 static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
62 uint64_t value,
63 unsigned width) {
64 VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
65 (width == 32));
66 VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
67 uint64_t result = value & ((UINT64_C(1) << width) - 1);
68 for (unsigned i = width; i < reg_size; i *= 2) {
69 result |= (result << i);
70 }
71 return result;
72 }
73
74
IsLoad() const75 bool Instruction::IsLoad() const {
76 if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
77 return false;
78 }
79
80 if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
81 return Mask(LoadStorePairLBit) != 0;
82 } else {
83 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
84 switch (op) {
85 case LDRB_w:
86 case LDRH_w:
87 case LDR_w:
88 case LDR_x:
89 case LDRSB_w:
90 case LDRSB_x:
91 case LDRSH_w:
92 case LDRSH_x:
93 case LDRSW_x:
94 case LDR_b:
95 case LDR_h:
96 case LDR_s:
97 case LDR_d:
98 case LDR_q: return true;
99 default: return false;
100 }
101 }
102 }
103
104
IsStore() const105 bool Instruction::IsStore() const {
106 if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
107 return false;
108 }
109
110 if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
111 return Mask(LoadStorePairLBit) == 0;
112 } else {
113 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
114 switch (op) {
115 case STRB_w:
116 case STRH_w:
117 case STR_w:
118 case STR_x:
119 case STR_b:
120 case STR_h:
121 case STR_s:
122 case STR_d:
123 case STR_q: return true;
124 default: return false;
125 }
126 }
127 }
128
129
130 // Logical immediates can't encode zero, so a return value of zero is used to
131 // indicate a failure case. Specifically, where the constraints on imm_s are
132 // not met.
ImmLogical() const133 uint64_t Instruction::ImmLogical() const {
134 unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
135 int32_t n = BitN();
136 int32_t imm_s = ImmSetBits();
137 int32_t imm_r = ImmRotate();
138
139 // An integer is constructed from the n, imm_s and imm_r bits according to
140 // the following table:
141 //
142 // N imms immr size S R
143 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
144 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
145 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
146 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
147 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
148 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
149 // (s bits must not be all set)
150 //
151 // A pattern is constructed of size bits, where the least significant S+1
152 // bits are set. The pattern is rotated right by R, and repeated across a
153 // 32 or 64-bit value, depending on destination register width.
154 //
155
156 if (n == 1) {
157 if (imm_s == 0x3f) {
158 return 0;
159 }
160 uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
161 return RotateRight(bits, imm_r, 64);
162 } else {
163 if ((imm_s >> 1) == 0x1f) {
164 return 0;
165 }
166 for (int width = 0x20; width >= 0x2; width >>= 1) {
167 if ((imm_s & width) == 0) {
168 int mask = width - 1;
169 if ((imm_s & mask) == mask) {
170 return 0;
171 }
172 uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
173 return RepeatBitsAcrossReg(reg_size,
174 RotateRight(bits, imm_r & mask, width),
175 width);
176 }
177 }
178 }
179 VIXL_UNREACHABLE();
180 return 0;
181 }
182
183
ImmNEONabcdefgh() const184 uint32_t Instruction::ImmNEONabcdefgh() const {
185 return ImmNEONabc() << 5 | ImmNEONdefgh();
186 }
187
188
Imm8ToFP32(uint32_t imm8)189 float Instruction::Imm8ToFP32(uint32_t imm8) {
190 // Imm8: abcdefgh (8 bits)
191 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
192 // where B is b ^ 1
193 uint32_t bits = imm8;
194 uint32_t bit7 = (bits >> 7) & 0x1;
195 uint32_t bit6 = (bits >> 6) & 0x1;
196 uint32_t bit5_to_0 = bits & 0x3f;
197 uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
198
199 return rawbits_to_float(result);
200 }
201
202
ImmFP32() const203 float Instruction::ImmFP32() const {
204 return Imm8ToFP32(ImmFP());
205 }
206
207
Imm8ToFP64(uint32_t imm8)208 double Instruction::Imm8ToFP64(uint32_t imm8) {
209 // Imm8: abcdefgh (8 bits)
210 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
211 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
212 // where B is b ^ 1
213 uint32_t bits = imm8;
214 uint64_t bit7 = (bits >> 7) & 0x1;
215 uint64_t bit6 = (bits >> 6) & 0x1;
216 uint64_t bit5_to_0 = bits & 0x3f;
217 uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
218
219 return rawbits_to_double(result);
220 }
221
222
ImmFP64() const223 double Instruction::ImmFP64() const {
224 return Imm8ToFP64(ImmFP());
225 }
226
227
ImmNEONFP32() const228 float Instruction::ImmNEONFP32() const {
229 return Imm8ToFP32(ImmNEONabcdefgh());
230 }
231
232
ImmNEONFP64() const233 double Instruction::ImmNEONFP64() const {
234 return Imm8ToFP64(ImmNEONabcdefgh());
235 }
236
237
CalcLSDataSize(LoadStoreOp op)238 unsigned CalcLSDataSize(LoadStoreOp op) {
239 VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
240 unsigned size = static_cast<Instr>(op) >> LSSize_offset;
241 if ((op & LSVector_mask) != 0) {
242 // Vector register memory operations encode the access size in the "size"
243 // and "opc" fields.
244 if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
245 size = kQRegSizeInBytesLog2;
246 }
247 }
248 return size;
249 }
250
251
CalcLSPairDataSize(LoadStorePairOp op)252 unsigned CalcLSPairDataSize(LoadStorePairOp op) {
253 VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
254 VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
255 switch (op) {
256 case STP_q:
257 case LDP_q: return kQRegSizeInBytesLog2;
258 case STP_x:
259 case LDP_x:
260 case STP_d:
261 case LDP_d: return kXRegSizeInBytesLog2;
262 default: return kWRegSizeInBytesLog2;
263 }
264 }
265
266
ImmBranchRangeBitwidth(ImmBranchType branch_type)267 int Instruction::ImmBranchRangeBitwidth(ImmBranchType branch_type) {
268 switch (branch_type) {
269 case UncondBranchType:
270 return ImmUncondBranch_width;
271 case CondBranchType:
272 return ImmCondBranch_width;
273 case CompareBranchType:
274 return ImmCmpBranch_width;
275 case TestBranchType:
276 return ImmTestBranch_width;
277 default:
278 VIXL_UNREACHABLE();
279 return 0;
280 }
281 }
282
283
ImmBranchForwardRange(ImmBranchType branch_type)284 int32_t Instruction::ImmBranchForwardRange(ImmBranchType branch_type) {
285 int32_t encoded_max = 1 << (ImmBranchRangeBitwidth(branch_type) - 1);
286 return encoded_max * kInstructionSize;
287 }
288
289
IsValidImmPCOffset(ImmBranchType branch_type,int64_t offset)290 bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
291 int64_t offset) {
292 return is_intn(ImmBranchRangeBitwidth(branch_type), offset);
293 }
294
ImmBranchTypeToRange(ImmBranchType branch_type)295 ImmBranchRangeType Instruction::ImmBranchTypeToRange(ImmBranchType branch_type)
296 {
297 switch (branch_type) {
298 case UncondBranchType:
299 return UncondBranchRangeType;
300 case CondBranchType:
301 case CompareBranchType:
302 return CondBranchRangeType;
303 case TestBranchType:
304 return TestBranchRangeType;
305 default:
306 return UnknownBranchRangeType;
307 }
308 }
309
ImmBranchMaxForwardOffset(ImmBranchRangeType range_type)310 int32_t Instruction::ImmBranchMaxForwardOffset(ImmBranchRangeType range_type)
311 {
312 // Branches encode a pc-relative two's complement number of 32-bit
313 // instructions. Compute the number of bytes corresponding to the largest
314 // positive number of instructions that can be encoded.
315 switch(range_type) {
316 case TestBranchRangeType:
317 return ((1 << ImmTestBranch_width) - 1) / 2 * kInstructionSize;
318 case CondBranchRangeType:
319 return ((1 << ImmCondBranch_width) - 1) / 2 * kInstructionSize;
320 case UncondBranchRangeType:
321 return ((1 << ImmUncondBranch_width) - 1) / 2 * kInstructionSize;
322 default:
323 VIXL_UNREACHABLE();
324 return 0;
325 }
326 }
327
ImmBranchMinBackwardOffset(ImmBranchRangeType range_type)328 int32_t Instruction::ImmBranchMinBackwardOffset(ImmBranchRangeType range_type)
329 {
330 switch(range_type) {
331 case TestBranchRangeType:
332 return -int32_t(1 << ImmTestBranch_width) / 2 * kInstructionSize;
333 case CondBranchRangeType:
334 return -int32_t(1 << ImmCondBranch_width) / 2 * kInstructionSize;
335 case UncondBranchRangeType:
336 return -int32_t(1 << ImmUncondBranch_width) / 2 * kInstructionSize;
337 default:
338 VIXL_UNREACHABLE();
339 return 0;
340 }
341 }
342
ImmPCOffsetTarget() const343 const Instruction* Instruction::ImmPCOffsetTarget() const {
344 const Instruction * base = this;
345 ptrdiff_t offset;
346 if (IsPCRelAddressing()) {
347 // ADR and ADRP.
348 offset = ImmPCRel();
349 if (Mask(PCRelAddressingMask) == ADRP) {
350 base = AlignDown(base, kPageSize);
351 offset *= kPageSize;
352 } else {
353 VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
354 }
355 } else {
356 // All PC-relative branches.
357 VIXL_ASSERT(BranchType() != UnknownBranchType);
358 // Relative branch offsets are instruction-size-aligned.
359 offset = ImmBranch() << kInstructionSizeLog2;
360 }
361 return base + offset;
362 }
363
364
ImmBranch() const365 int Instruction::ImmBranch() const {
366 switch (BranchType()) {
367 case CondBranchType: return ImmCondBranch();
368 case UncondBranchType: return ImmUncondBranch();
369 case CompareBranchType: return ImmCmpBranch();
370 case TestBranchType: return ImmTestBranch();
371 default: VIXL_UNREACHABLE();
372 }
373 return 0;
374 }
375
376
SetImmPCOffsetTarget(const Instruction * target)377 void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
378 if (IsPCRelAddressing()) {
379 SetPCRelImmTarget(target);
380 } else {
381 SetBranchImmTarget(target);
382 }
383 }
384
385
SetPCRelImmTarget(const Instruction * target)386 void Instruction::SetPCRelImmTarget(const Instruction* target) {
387 ptrdiff_t imm21;
388 if ((Mask(PCRelAddressingMask) == ADR)) {
389 imm21 = target - this;
390 } else {
391 VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
392 uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
393 uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
394 imm21 = target_page - this_page;
395 }
396 Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
397
398 SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
399 }
400
401
SetBranchImmTarget(const Instruction * target)402 void Instruction::SetBranchImmTarget(const Instruction* target) {
403 VIXL_ASSERT(((target - this) & 3) == 0);
404 Instr branch_imm = 0;
405 uint32_t imm_mask = 0;
406 int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
407 switch (BranchType()) {
408 case CondBranchType: {
409 branch_imm = Assembler::ImmCondBranch(offset);
410 imm_mask = ImmCondBranch_mask;
411 break;
412 }
413 case UncondBranchType: {
414 branch_imm = Assembler::ImmUncondBranch(offset);
415 imm_mask = ImmUncondBranch_mask;
416 break;
417 }
418 case CompareBranchType: {
419 branch_imm = Assembler::ImmCmpBranch(offset);
420 imm_mask = ImmCmpBranch_mask;
421 break;
422 }
423 case TestBranchType: {
424 branch_imm = Assembler::ImmTestBranch(offset);
425 imm_mask = ImmTestBranch_mask;
426 break;
427 }
428 default: VIXL_UNREACHABLE();
429 }
430 SetInstructionBits(Mask(~imm_mask) | branch_imm);
431 }
432
433
SetImmLLiteral(const Instruction * source)434 void Instruction::SetImmLLiteral(const Instruction* source) {
435 VIXL_ASSERT(IsWordAligned(source));
436 ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
437 Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
438 Instr mask = ImmLLiteral_mask;
439
440 SetInstructionBits(Mask(~mask) | imm);
441 }
442
443
VectorFormatHalfWidth(const VectorFormat vform)444 VectorFormat VectorFormatHalfWidth(const VectorFormat vform) {
445 VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
446 vform == kFormatH || vform == kFormatS || vform == kFormatD);
447 switch (vform) {
448 case kFormat8H: return kFormat8B;
449 case kFormat4S: return kFormat4H;
450 case kFormat2D: return kFormat2S;
451 case kFormatH: return kFormatB;
452 case kFormatS: return kFormatH;
453 case kFormatD: return kFormatS;
454 default: VIXL_UNREACHABLE(); return kFormatUndefined;
455 }
456 }
457
458
VectorFormatDoubleWidth(const VectorFormat vform)459 VectorFormat VectorFormatDoubleWidth(const VectorFormat vform) {
460 VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
461 vform == kFormatB || vform == kFormatH || vform == kFormatS);
462 switch (vform) {
463 case kFormat8B: return kFormat8H;
464 case kFormat4H: return kFormat4S;
465 case kFormat2S: return kFormat2D;
466 case kFormatB: return kFormatH;
467 case kFormatH: return kFormatS;
468 case kFormatS: return kFormatD;
469 default: VIXL_UNREACHABLE(); return kFormatUndefined;
470 }
471 }
472
473
VectorFormatFillQ(const VectorFormat vform)474 VectorFormat VectorFormatFillQ(const VectorFormat vform) {
475 switch (vform) {
476 case kFormatB:
477 case kFormat8B:
478 case kFormat16B: return kFormat16B;
479 case kFormatH:
480 case kFormat4H:
481 case kFormat8H: return kFormat8H;
482 case kFormatS:
483 case kFormat2S:
484 case kFormat4S: return kFormat4S;
485 case kFormatD:
486 case kFormat1D:
487 case kFormat2D: return kFormat2D;
488 default: VIXL_UNREACHABLE(); return kFormatUndefined;
489 }
490 }
491
VectorFormatHalfWidthDoubleLanes(const VectorFormat vform)492 VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform) {
493 switch (vform) {
494 case kFormat4H: return kFormat8B;
495 case kFormat8H: return kFormat16B;
496 case kFormat2S: return kFormat4H;
497 case kFormat4S: return kFormat8H;
498 case kFormat1D: return kFormat2S;
499 case kFormat2D: return kFormat4S;
500 default: VIXL_UNREACHABLE(); return kFormatUndefined;
501 }
502 }
503
VectorFormatDoubleLanes(const VectorFormat vform)504 VectorFormat VectorFormatDoubleLanes(const VectorFormat vform) {
505 VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
506 switch (vform) {
507 case kFormat8B: return kFormat16B;
508 case kFormat4H: return kFormat8H;
509 case kFormat2S: return kFormat4S;
510 default: VIXL_UNREACHABLE(); return kFormatUndefined;
511 }
512 }
513
514
VectorFormatHalfLanes(const VectorFormat vform)515 VectorFormat VectorFormatHalfLanes(const VectorFormat vform) {
516 VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
517 switch (vform) {
518 case kFormat16B: return kFormat8B;
519 case kFormat8H: return kFormat4H;
520 case kFormat4S: return kFormat2S;
521 default: VIXL_UNREACHABLE(); return kFormatUndefined;
522 }
523 }
524
525
ScalarFormatFromLaneSize(int laneSize)526 VectorFormat ScalarFormatFromLaneSize(int laneSize) {
527 switch (laneSize) {
528 case 8: return kFormatB;
529 case 16: return kFormatH;
530 case 32: return kFormatS;
531 case 64: return kFormatD;
532 default: VIXL_UNREACHABLE(); return kFormatUndefined;
533 }
534 }
535
536
RegisterSizeInBitsFromFormat(VectorFormat vform)537 unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
538 VIXL_ASSERT(vform != kFormatUndefined);
539 switch (vform) {
540 case kFormatB: return kBRegSize;
541 case kFormatH: return kHRegSize;
542 case kFormatS: return kSRegSize;
543 case kFormatD: return kDRegSize;
544 case kFormat8B:
545 case kFormat4H:
546 case kFormat2S:
547 case kFormat1D: return kDRegSize;
548 default: return kQRegSize;
549 }
550 }
551
552
RegisterSizeInBytesFromFormat(VectorFormat vform)553 unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
554 return RegisterSizeInBitsFromFormat(vform) / 8;
555 }
556
557
LaneSizeInBitsFromFormat(VectorFormat vform)558 unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
559 VIXL_ASSERT(vform != kFormatUndefined);
560 switch (vform) {
561 case kFormatB:
562 case kFormat8B:
563 case kFormat16B: return 8;
564 case kFormatH:
565 case kFormat4H:
566 case kFormat8H: return 16;
567 case kFormatS:
568 case kFormat2S:
569 case kFormat4S: return 32;
570 case kFormatD:
571 case kFormat1D:
572 case kFormat2D: return 64;
573 default: VIXL_UNREACHABLE(); return 0;
574 }
575 }
576
577
LaneSizeInBytesFromFormat(VectorFormat vform)578 int LaneSizeInBytesFromFormat(VectorFormat vform) {
579 return LaneSizeInBitsFromFormat(vform) / 8;
580 }
581
582
LaneSizeInBytesLog2FromFormat(VectorFormat vform)583 int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
584 VIXL_ASSERT(vform != kFormatUndefined);
585 switch (vform) {
586 case kFormatB:
587 case kFormat8B:
588 case kFormat16B: return 0;
589 case kFormatH:
590 case kFormat4H:
591 case kFormat8H: return 1;
592 case kFormatS:
593 case kFormat2S:
594 case kFormat4S: return 2;
595 case kFormatD:
596 case kFormat1D:
597 case kFormat2D: return 3;
598 default: VIXL_UNREACHABLE(); return 0;
599 }
600 }
601
602
LaneCountFromFormat(VectorFormat vform)603 int LaneCountFromFormat(VectorFormat vform) {
604 VIXL_ASSERT(vform != kFormatUndefined);
605 switch (vform) {
606 case kFormat16B: return 16;
607 case kFormat8B:
608 case kFormat8H: return 8;
609 case kFormat4H:
610 case kFormat4S: return 4;
611 case kFormat2S:
612 case kFormat2D: return 2;
613 case kFormat1D:
614 case kFormatB:
615 case kFormatH:
616 case kFormatS:
617 case kFormatD: return 1;
618 default: VIXL_UNREACHABLE(); return 0;
619 }
620 }
621
622
MaxLaneCountFromFormat(VectorFormat vform)623 int MaxLaneCountFromFormat(VectorFormat vform) {
624 VIXL_ASSERT(vform != kFormatUndefined);
625 switch (vform) {
626 case kFormatB:
627 case kFormat8B:
628 case kFormat16B: return 16;
629 case kFormatH:
630 case kFormat4H:
631 case kFormat8H: return 8;
632 case kFormatS:
633 case kFormat2S:
634 case kFormat4S: return 4;
635 case kFormatD:
636 case kFormat1D:
637 case kFormat2D: return 2;
638 default: VIXL_UNREACHABLE(); return 0;
639 }
640 }
641
642
643 // Does 'vform' indicate a vector format or a scalar format?
IsVectorFormat(VectorFormat vform)644 bool IsVectorFormat(VectorFormat vform) {
645 VIXL_ASSERT(vform != kFormatUndefined);
646 switch (vform) {
647 case kFormatB:
648 case kFormatH:
649 case kFormatS:
650 case kFormatD: return false;
651 default: return true;
652 }
653 }
654
655
MaxIntFromFormat(VectorFormat vform)656 int64_t MaxIntFromFormat(VectorFormat vform) {
657 return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
658 }
659
660
MinIntFromFormat(VectorFormat vform)661 int64_t MinIntFromFormat(VectorFormat vform) {
662 return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
663 }
664
665
MaxUintFromFormat(VectorFormat vform)666 uint64_t MaxUintFromFormat(VectorFormat vform) {
667 return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
668 }
669 } // namespace vixl
670
671