1 /* -*- mode: C; c-basic-offset: 3; -*- */
2 
3 /*---------------------------------------------------------------*/
4 /*--- Begin                                       main_main.c ---*/
5 /*---------------------------------------------------------------*/
6 
7 /*
8    This file is part of Valgrind, a dynamic binary instrumentation
9    framework.
10 
11    Copyright (C) 2004-2017 OpenWorks LLP
12       info@open-works.net
13 
14    This program is free software; you can redistribute it and/or
15    modify it under the terms of the GNU General Public License as
16    published by the Free Software Foundation; either version 2 of the
17    License, or (at your option) any later version.
18 
19    This program is distributed in the hope that it will be useful, but
20    WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    General Public License for more details.
23 
24    You should have received a copy of the GNU General Public License
25    along with this program; if not, write to the Free Software
26    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27    02110-1301, USA.
28 
29    The GNU General Public License is contained in the file COPYING.
30 
31    Neither the names of the U.S. Department of Energy nor the
32    University of California nor the names of its contributors may be
33    used to endorse or promote products derived from this software
34    without prior written permission.
35 */
36 
37 #include "libvex.h"
38 #include "libvex_emnote.h"
39 #include "libvex_guest_x86.h"
40 #include "libvex_guest_amd64.h"
41 #include "libvex_guest_arm.h"
42 #include "libvex_guest_arm64.h"
43 #include "libvex_guest_ppc32.h"
44 #include "libvex_guest_ppc64.h"
45 #include "libvex_guest_s390x.h"
46 #include "libvex_guest_mips32.h"
47 #include "libvex_guest_mips64.h"
48 
49 #include "main_globals.h"
50 #include "main_util.h"
51 #include "host_generic_regs.h"
52 #include "ir_opt.h"
53 
54 #include "host_x86_defs.h"
55 #include "host_amd64_defs.h"
56 #include "host_ppc_defs.h"
57 #include "host_arm_defs.h"
58 #include "host_arm64_defs.h"
59 #include "host_s390_defs.h"
60 #include "host_mips_defs.h"
61 
62 #include "guest_generic_bb_to_IR.h"
63 #include "guest_x86_defs.h"
64 #include "guest_amd64_defs.h"
65 #include "guest_arm_defs.h"
66 #include "guest_arm64_defs.h"
67 #include "guest_ppc_defs.h"
68 #include "guest_s390_defs.h"
69 #include "guest_mips_defs.h"
70 
71 #include "host_generic_simd128.h"
72 
73 /* For each architecture <arch>, we define 2 macros:
74    <arch>FN that has as argument a pointer (typically to a function
75             or the return value of a function).
76    <arch>ST that has as argument a statement.
77    If main_main.c is compiled for <arch>, then these macros just expand
78    their arg.
79    Otherwise, the macros expand to respectively NULL and vassert(0).
80    These macros are used to avoid introducing dependencies to object
81    files not needed for the (only) architecture we are compiling for.
82 
83    To still compile the below for all supported architectures, define
84    VEXMULTIARCH. This is used by the file multiarch_main_main.c */
85 
86 #if defined(VGA_x86) || defined(VEXMULTIARCH)
87 #define X86FN(f) f
88 #define X86ST(f) f
89 #else
90 #define X86FN(f) NULL
91 #define X86ST(f) vassert(0)
92 #endif
93 
94 #if defined(VGA_amd64) || defined(VEXMULTIARCH)
95 #define AMD64FN(f) f
96 #define AMD64ST(f) f
97 #else
98 #define AMD64FN(f) NULL
99 #define AMD64ST(f) vassert(0)
100 #endif
101 
102 #if defined(VGA_ppc32) || defined(VEXMULTIARCH)
103 #define PPC32FN(f) f
104 #define PPC32ST(f) f
105 #else
106 #define PPC32FN(f) NULL
107 #define PPC32ST(f) vassert(0)
108 #endif
109 
110 #if defined(VGA_ppc64be) || defined(VGA_ppc64le) || defined(VEXMULTIARCH)
111 #define PPC64FN(f) f
112 #define PPC64ST(f) f
113 #else
114 #define PPC64FN(f) NULL
115 #define PPC64ST(f) vassert(0)
116 #endif
117 
118 #if defined(VGA_s390x) || defined(VEXMULTIARCH)
119 #define S390FN(f) f
120 #define S390ST(f) f
121 #else
122 #define S390FN(f) NULL
123 #define S390ST(f) vassert(0)
124 #endif
125 
126 #if defined(VGA_arm) || defined(VEXMULTIARCH)
127 #define ARMFN(f) f
128 #define ARMST(f) f
129 #else
130 #define ARMFN(f) NULL
131 #define ARMST(f) vassert(0)
132 #endif
133 
134 #if defined(VGA_arm64) || defined(VEXMULTIARCH)
135 #define ARM64FN(f) f
136 #define ARM64ST(f) f
137 #else
138 #define ARM64FN(f) NULL
139 #define ARM64ST(f) vassert(0)
140 #endif
141 
142 #if defined(VGA_mips32) || defined(VEXMULTIARCH)
143 #define MIPS32FN(f) f
144 #define MIPS32ST(f) f
145 #else
146 #define MIPS32FN(f) NULL
147 #define MIPS32ST(f) vassert(0)
148 #endif
149 
150 #if defined(VGA_mips64) || defined(VEXMULTIARCH)
151 #define MIPS64FN(f) f
152 #define MIPS64ST(f) f
153 #else
154 #define MIPS64FN(f) NULL
155 #define MIPS64ST(f) vassert(0)
156 #endif
157 
158 
159 /* This file contains the top level interface to the library. */
160 
161 /* --------- fwds ... --------- */
162 
163 static void  check_hwcaps ( VexArch arch, UInt hwcaps );
164 static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps );
165 static IRType arch_word_size ( VexArch arch );
166 
167 /* --------- helpers --------- */
168 
169 __attribute__((noinline))
udiv32(UInt x,UInt y)170 static UInt udiv32 ( UInt x, UInt y ) { return x/y; }
171 __attribute__((noinline))
sdiv32(Int x,Int y)172 static  Int sdiv32 (  Int x,  Int y ) { return x/y; }
173 
174 
175 /* --------- Initialise the library. --------- */
176 
177 /* Exported to library client. */
178 
LibVEX_default_VexControl(VexControl * vcon)179 void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon )
180 {
181    vex_bzero(vcon, sizeof(*vcon));
182    vcon->iropt_verbosity                = 0;
183    vcon->iropt_level                    = 2;
184    vcon->iropt_register_updates_default = VexRegUpdUnwindregsAtMemAccess;
185    vcon->iropt_unroll_thresh            = 120;
186    vcon->guest_max_insns                = 60;
187    vcon->guest_chase_thresh             = 10;
188    vcon->guest_chase_cond               = False;
189    vcon->regalloc_version               = 3;
190 }
191 
192 
193 /* Exported to library client. */
194 
LibVEX_Init(void (* failure_exit)(void),void (* log_bytes)(const HChar *,SizeT nbytes),Int debuglevel,const VexControl * vcon)195 void LibVEX_Init (
196    /* failure exit function */
197    __attribute__ ((noreturn))
198    void (*failure_exit) ( void ),
199    /* logging output function */
200    void (*log_bytes) ( const HChar*, SizeT nbytes ),
201    /* debug paranoia level */
202    Int debuglevel,
203    /* Control ... */
204    const VexControl* vcon
205 )
206 {
207    /* First off, do enough minimal setup so that the following
208       assertions can fail in a sane fashion, if need be. */
209    vex_failure_exit = failure_exit;
210    vex_log_bytes    = log_bytes;
211 
212    /* Now it's safe to check parameters for sanity. */
213    vassert(!vex_initdone);
214    vassert(failure_exit);
215    vassert(log_bytes);
216    vassert(debuglevel >= 0);
217 
218    vassert(vcon->iropt_verbosity >= 0);
219    vassert(vcon->iropt_level >= 0);
220    vassert(vcon->iropt_level <= 2);
221    vassert(vcon->iropt_unroll_thresh >= 0);
222    vassert(vcon->iropt_unroll_thresh <= 400);
223    vassert(vcon->guest_max_insns >= 1);
224    vassert(vcon->guest_max_insns <= 100);
225    vassert(vcon->guest_chase_thresh >= 0);
226    vassert(vcon->guest_chase_thresh < vcon->guest_max_insns);
227    vassert(vcon->guest_chase_cond == True
228            || vcon->guest_chase_cond == False);
229    vassert(vcon->regalloc_version == 2 || vcon->regalloc_version == 3);
230 
231    /* Check that Vex has been built with sizes of basic types as
232       stated in priv/libvex_basictypes.h.  Failure of any of these is
233       a serious configuration error and should be corrected
234       immediately.  If any of these assertions fail you can fully
235       expect Vex not to work properly, if at all. */
236 
237    vassert(1 == sizeof(UChar));
238    vassert(1 == sizeof(Char));
239    vassert(2 == sizeof(UShort));
240    vassert(2 == sizeof(Short));
241    vassert(4 == sizeof(UInt));
242    vassert(4 == sizeof(Int));
243    vassert(8 == sizeof(ULong));
244    vassert(8 == sizeof(Long));
245    vassert(4 == sizeof(Float));
246    vassert(8 == sizeof(Double));
247    vassert(1 == sizeof(Bool));
248    vassert(4 == sizeof(Addr32));
249    vassert(8 == sizeof(Addr64));
250    vassert(16 == sizeof(U128));
251    vassert(16 == sizeof(V128));
252    vassert(32 == sizeof(U256));
253 
254    vassert(sizeof(void*) == 4 || sizeof(void*) == 8);
255    vassert(sizeof(void*) == sizeof(int*));
256    vassert(sizeof(void*) == sizeof(HWord));
257    vassert(sizeof(void*) == sizeof(Addr));
258    vassert(sizeof(unsigned long) == sizeof(SizeT));
259 
260    vassert(VEX_HOST_WORDSIZE == sizeof(void*));
261    vassert(VEX_HOST_WORDSIZE == sizeof(HWord));
262 
263    /* These take a lot of space, so make sure we don't have
264       any unnoticed size regressions. */
265    if (VEX_HOST_WORDSIZE == 4) {
266       vassert(sizeof(IRExpr) == 16);
267       vassert(sizeof(IRStmt) == 20 /* x86 */
268               || sizeof(IRStmt) == 24 /* arm */);
269    } else {
270       vassert(sizeof(IRExpr) == 32);
271       vassert(sizeof(IRStmt) == 32);
272    }
273 
274    /* Ditto */
275    vassert(sizeof(HReg) == 4);
276    /* If N_RREGUNIVERSE_REGS ever exceeds 64, the bitset fields in
277       RRegSet and HRegUsage will need to be changed to something
278       better than ULong. */
279    vassert(N_RREGUNIVERSE_REGS == 64);
280 
281    /* Check that signed integer division on the host rounds towards
282       zero.  If not, h_calc_sdiv32_w_arm_semantics() won't work
283       correctly. */
284    /* 100.0 / 7.0 == 14.2857 */
285    vassert(udiv32(100, 7) == 14);
286    vassert(sdiv32(100, 7) == 14);
287    vassert(sdiv32(-100, 7) == -14); /* and not -15 */
288    vassert(sdiv32(100, -7) == -14); /* ditto */
289    vassert(sdiv32(-100, -7) == 14); /* not sure what this proves */
290 
291    /* Really start up .. */
292    vex_debuglevel         = debuglevel;
293    vex_control            = *vcon;
294    vex_initdone           = True;
295    vexSetAllocMode ( VexAllocModeTEMP );
296 }
297 
298 
299 /* --------- Make a translation. --------- */
300 
301 /* KLUDGE: S390 need to know the hwcaps of the host when generating
302    code. But that info is not passed to emit_S390Instr. Only mode64 is
303    being passed. So, ideally, we want this passed as an argument, too.
304    Until then, we use a global variable. This variable is set as a side
305    effect of LibVEX_Translate. The variable is defined here rather than
306    in host_s390_defs.c to avoid having main_main.c dragging S390
307    object files in non VEXMULTIARCH. */
308 UInt s390_host_hwcaps;
309 
310 
311 /* Exported to library client. */
312 
LibVEX_FrontEnd(VexTranslateArgs * vta,VexTranslateResult * res,VexRegisterUpdates * pxControl)313 IRSB* LibVEX_FrontEnd ( /*MOD*/ VexTranslateArgs* vta,
314                         /*OUT*/ VexTranslateResult* res,
315                         /*OUT*/ VexRegisterUpdates* pxControl)
316 {
317    IRExpr*      (*specHelper)   ( const HChar*, IRExpr**, IRStmt**, Int );
318    Bool (*preciseMemExnsFn) ( Int, Int, VexRegisterUpdates );
319    DisOneInstrFn disInstrFn;
320 
321    VexGuestLayout* guest_layout;
322    IRSB*           irsb;
323    Int             i;
324    Int             offB_CMSTART, offB_CMLEN, offB_GUEST_IP, szB_GUEST_IP;
325    IRType          guest_word_type;
326    IRType          host_word_type;
327 
328    guest_layout            = NULL;
329    specHelper              = NULL;
330    disInstrFn              = NULL;
331    preciseMemExnsFn        = NULL;
332    guest_word_type         = arch_word_size(vta->arch_guest);
333    host_word_type          = arch_word_size(vta->arch_host);
334    offB_CMSTART            = 0;
335    offB_CMLEN              = 0;
336    offB_GUEST_IP           = 0;
337    szB_GUEST_IP            = 0;
338 
339    vassert(vex_initdone);
340    vassert(vta->needs_self_check  != NULL);
341    vassert(vta->disp_cp_xassisted != NULL);
342    /* Both the chainers and the indir are either NULL or non-NULL. */
343    if (vta->disp_cp_chain_me_to_slowEP        != NULL) {
344       vassert(vta->disp_cp_chain_me_to_fastEP != NULL);
345       vassert(vta->disp_cp_xindir             != NULL);
346    } else {
347       vassert(vta->disp_cp_chain_me_to_fastEP == NULL);
348       vassert(vta->disp_cp_xindir             == NULL);
349    }
350 
351    vexSetAllocModeTEMP_and_clear();
352    vexAllocSanityCheck();
353 
354    vex_traceflags = vta->traceflags;
355 
356    /* KLUDGE: export hwcaps. */
357    if (vta->arch_host == VexArchS390X) {
358       s390_host_hwcaps = vta->archinfo_host.hwcaps;
359    }
360 
361    /* First off, check that the guest and host insn sets
362       are supported. */
363 
364    switch (vta->arch_guest) {
365 
366       case VexArchX86:
367          preciseMemExnsFn
368             = X86FN(guest_x86_state_requires_precise_mem_exns);
369          disInstrFn              = X86FN(disInstr_X86);
370          specHelper              = X86FN(guest_x86_spechelper);
371          guest_layout            = X86FN(&x86guest_layout);
372          offB_CMSTART            = offsetof(VexGuestX86State,guest_CMSTART);
373          offB_CMLEN              = offsetof(VexGuestX86State,guest_CMLEN);
374          offB_GUEST_IP           = offsetof(VexGuestX86State,guest_EIP);
375          szB_GUEST_IP            = sizeof( ((VexGuestX86State*)0)->guest_EIP );
376          vassert(vta->archinfo_guest.endness == VexEndnessLE);
377          vassert(0 == sizeof(VexGuestX86State) % LibVEX_GUEST_STATE_ALIGN);
378          vassert(sizeof( ((VexGuestX86State*)0)->guest_CMSTART) == 4);
379          vassert(sizeof( ((VexGuestX86State*)0)->guest_CMLEN  ) == 4);
380          vassert(sizeof( ((VexGuestX86State*)0)->guest_NRADDR ) == 4);
381          break;
382 
383       case VexArchAMD64:
384          preciseMemExnsFn
385             = AMD64FN(guest_amd64_state_requires_precise_mem_exns);
386          disInstrFn              = AMD64FN(disInstr_AMD64);
387          specHelper              = AMD64FN(guest_amd64_spechelper);
388          guest_layout            = AMD64FN(&amd64guest_layout);
389          offB_CMSTART            = offsetof(VexGuestAMD64State,guest_CMSTART);
390          offB_CMLEN              = offsetof(VexGuestAMD64State,guest_CMLEN);
391          offB_GUEST_IP           = offsetof(VexGuestAMD64State,guest_RIP);
392          szB_GUEST_IP            = sizeof( ((VexGuestAMD64State*)0)->guest_RIP );
393          vassert(vta->archinfo_guest.endness == VexEndnessLE);
394          vassert(0 == sizeof(VexGuestAMD64State) % LibVEX_GUEST_STATE_ALIGN);
395          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMSTART ) == 8);
396          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMLEN   ) == 8);
397          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_NRADDR  ) == 8);
398          break;
399 
400       case VexArchPPC32:
401          preciseMemExnsFn
402             = PPC32FN(guest_ppc32_state_requires_precise_mem_exns);
403          disInstrFn              = PPC32FN(disInstr_PPC);
404          specHelper              = PPC32FN(guest_ppc32_spechelper);
405          guest_layout            = PPC32FN(&ppc32Guest_layout);
406          offB_CMSTART            = offsetof(VexGuestPPC32State,guest_CMSTART);
407          offB_CMLEN              = offsetof(VexGuestPPC32State,guest_CMLEN);
408          offB_GUEST_IP           = offsetof(VexGuestPPC32State,guest_CIA);
409          szB_GUEST_IP            = sizeof( ((VexGuestPPC32State*)0)->guest_CIA );
410          vassert(vta->archinfo_guest.endness == VexEndnessBE);
411          vassert(0 == sizeof(VexGuestPPC32State) % LibVEX_GUEST_STATE_ALIGN);
412          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMSTART ) == 4);
413          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMLEN   ) == 4);
414          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_NRADDR  ) == 4);
415          break;
416 
417       case VexArchPPC64:
418          preciseMemExnsFn
419             = PPC64FN(guest_ppc64_state_requires_precise_mem_exns);
420          disInstrFn              = PPC64FN(disInstr_PPC);
421          specHelper              = PPC64FN(guest_ppc64_spechelper);
422          guest_layout            = PPC64FN(&ppc64Guest_layout);
423          offB_CMSTART            = offsetof(VexGuestPPC64State,guest_CMSTART);
424          offB_CMLEN              = offsetof(VexGuestPPC64State,guest_CMLEN);
425          offB_GUEST_IP           = offsetof(VexGuestPPC64State,guest_CIA);
426          szB_GUEST_IP            = sizeof( ((VexGuestPPC64State*)0)->guest_CIA );
427          vassert(vta->archinfo_guest.endness == VexEndnessBE ||
428                  vta->archinfo_guest.endness == VexEndnessLE );
429          vassert(0 == sizeof(VexGuestPPC64State) % LibVEX_GUEST_STATE_ALIGN);
430          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMSTART    ) == 8);
431          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMLEN      ) == 8);
432          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR     ) == 8);
433          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR_GPR2) == 8);
434          break;
435 
436       case VexArchS390X:
437          preciseMemExnsFn
438             = S390FN(guest_s390x_state_requires_precise_mem_exns);
439          disInstrFn              = S390FN(disInstr_S390);
440          specHelper              = S390FN(guest_s390x_spechelper);
441          guest_layout            = S390FN(&s390xGuest_layout);
442          offB_CMSTART            = offsetof(VexGuestS390XState,guest_CMSTART);
443          offB_CMLEN              = offsetof(VexGuestS390XState,guest_CMLEN);
444          offB_GUEST_IP           = offsetof(VexGuestS390XState,guest_IA);
445          szB_GUEST_IP            = sizeof( ((VexGuestS390XState*)0)->guest_IA);
446          vassert(vta->archinfo_guest.endness == VexEndnessBE);
447          vassert(0 == sizeof(VexGuestS390XState) % LibVEX_GUEST_STATE_ALIGN);
448          vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMSTART    ) == 8);
449          vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMLEN      ) == 8);
450          vassert(sizeof( ((VexGuestS390XState*)0)->guest_NRADDR     ) == 8);
451          break;
452 
453       case VexArchARM:
454          preciseMemExnsFn
455             = ARMFN(guest_arm_state_requires_precise_mem_exns);
456          disInstrFn              = ARMFN(disInstr_ARM);
457          specHelper              = ARMFN(guest_arm_spechelper);
458          guest_layout            = ARMFN(&armGuest_layout);
459          offB_CMSTART            = offsetof(VexGuestARMState,guest_CMSTART);
460          offB_CMLEN              = offsetof(VexGuestARMState,guest_CMLEN);
461          offB_GUEST_IP           = offsetof(VexGuestARMState,guest_R15T);
462          szB_GUEST_IP            = sizeof( ((VexGuestARMState*)0)->guest_R15T );
463          vassert(vta->archinfo_guest.endness == VexEndnessLE);
464          vassert(0 == sizeof(VexGuestARMState) % LibVEX_GUEST_STATE_ALIGN);
465          vassert(sizeof( ((VexGuestARMState*)0)->guest_CMSTART) == 4);
466          vassert(sizeof( ((VexGuestARMState*)0)->guest_CMLEN  ) == 4);
467          vassert(sizeof( ((VexGuestARMState*)0)->guest_NRADDR ) == 4);
468          break;
469 
470       case VexArchARM64:
471          preciseMemExnsFn
472             = ARM64FN(guest_arm64_state_requires_precise_mem_exns);
473          disInstrFn              = ARM64FN(disInstr_ARM64);
474          specHelper              = ARM64FN(guest_arm64_spechelper);
475          guest_layout            = ARM64FN(&arm64Guest_layout);
476          offB_CMSTART            = offsetof(VexGuestARM64State,guest_CMSTART);
477          offB_CMLEN              = offsetof(VexGuestARM64State,guest_CMLEN);
478          offB_GUEST_IP           = offsetof(VexGuestARM64State,guest_PC);
479          szB_GUEST_IP            = sizeof( ((VexGuestARM64State*)0)->guest_PC );
480          vassert(vta->archinfo_guest.endness == VexEndnessLE);
481          vassert(0 == sizeof(VexGuestARM64State) % LibVEX_GUEST_STATE_ALIGN);
482          vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMSTART) == 8);
483          vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMLEN  ) == 8);
484          vassert(sizeof( ((VexGuestARM64State*)0)->guest_NRADDR ) == 8);
485          break;
486 
487       case VexArchMIPS32:
488          preciseMemExnsFn
489             = MIPS32FN(guest_mips32_state_requires_precise_mem_exns);
490          disInstrFn              = MIPS32FN(disInstr_MIPS);
491          specHelper              = MIPS32FN(guest_mips32_spechelper);
492          guest_layout            = MIPS32FN(&mips32Guest_layout);
493          offB_CMSTART            = offsetof(VexGuestMIPS32State,guest_CMSTART);
494          offB_CMLEN              = offsetof(VexGuestMIPS32State,guest_CMLEN);
495          offB_GUEST_IP           = offsetof(VexGuestMIPS32State,guest_PC);
496          szB_GUEST_IP            = sizeof( ((VexGuestMIPS32State*)0)->guest_PC );
497          vassert(vta->archinfo_guest.endness == VexEndnessLE
498                  || vta->archinfo_guest.endness == VexEndnessBE);
499          vassert(0 == sizeof(VexGuestMIPS32State) % LibVEX_GUEST_STATE_ALIGN);
500          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMSTART) == 4);
501          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMLEN  ) == 4);
502          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_NRADDR ) == 4);
503          break;
504 
505       case VexArchMIPS64:
506          preciseMemExnsFn
507             = MIPS64FN(guest_mips64_state_requires_precise_mem_exns);
508          disInstrFn              = MIPS64FN(disInstr_MIPS);
509          specHelper              = MIPS64FN(guest_mips64_spechelper);
510          guest_layout            = MIPS64FN(&mips64Guest_layout);
511          offB_CMSTART            = offsetof(VexGuestMIPS64State,guest_CMSTART);
512          offB_CMLEN              = offsetof(VexGuestMIPS64State,guest_CMLEN);
513          offB_GUEST_IP           = offsetof(VexGuestMIPS64State,guest_PC);
514          szB_GUEST_IP            = sizeof( ((VexGuestMIPS64State*)0)->guest_PC );
515          vassert(vta->archinfo_guest.endness == VexEndnessLE
516                  || vta->archinfo_guest.endness == VexEndnessBE);
517          vassert(0 == sizeof(VexGuestMIPS64State) % LibVEX_GUEST_STATE_ALIGN);
518          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMSTART) == 8);
519          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMLEN  ) == 8);
520          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_NRADDR ) == 8);
521          break;
522 
523       default:
524          vpanic("LibVEX_Translate: unsupported guest insn set");
525    }
526 
527    // Are the guest's hardware capabilities feasible. The function will
528    // not return if hwcaps are infeasible in some sense.
529    // FIXME: how can we know the guest's hardware capabilities?
530    check_hwcaps(vta->arch_guest, vta->archinfo_guest.hwcaps);
531 
532    res->status         = VexTransOK;
533    res->n_sc_extents   = 0;
534    res->offs_profInc   = -1;
535    res->n_guest_instrs = 0;
536 
537 #ifndef VEXMULTIARCH
538    /* yet more sanity checks ... */
539    if (vta->arch_guest == vta->arch_host) {
540       /* doesn't necessarily have to be true, but if it isn't it means
541          we are simulating one flavour of an architecture a different
542          flavour of the same architecture, which is pretty strange. */
543       vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
544       /* ditto */
545       vassert(vta->archinfo_guest.endness == vta->archinfo_host.endness);
546    }
547 #endif
548 
549    vexAllocSanityCheck();
550 
551    if (vex_traceflags & VEX_TRACE_FE)
552       vex_printf("\n------------------------"
553                    " Front end "
554                    "------------------------\n\n");
555 
556    *pxControl = vex_control.iropt_register_updates_default;
557    vassert(*pxControl >= VexRegUpdSpAtMemAccess
558            && *pxControl <= VexRegUpdAllregsAtEachInsn);
559 
560    irsb = bb_to_IR ( vta->guest_extents,
561                      &res->n_sc_extents,
562                      &res->n_guest_instrs,
563                      pxControl,
564                      vta->callback_opaque,
565                      disInstrFn,
566                      vta->guest_bytes,
567                      vta->guest_bytes_addr,
568                      vta->chase_into_ok,
569                      vta->archinfo_host.endness,
570                      vta->sigill_diag,
571                      vta->arch_guest,
572                      &vta->archinfo_guest,
573                      &vta->abiinfo_both,
574                      guest_word_type,
575                      vta->needs_self_check,
576                      vta->preamble_function,
577                      offB_CMSTART,
578                      offB_CMLEN,
579                      offB_GUEST_IP,
580                      szB_GUEST_IP );
581 
582    vexAllocSanityCheck();
583 
584    if (irsb == NULL) {
585       /* Access failure. */
586       vexSetAllocModeTEMP_and_clear();
587       return NULL;
588    }
589 
590    vassert(vta->guest_extents->n_used >= 1 && vta->guest_extents->n_used <= 3);
591    vassert(vta->guest_extents->base[0] == vta->guest_bytes_addr);
592    for (i = 0; i < vta->guest_extents->n_used; i++) {
593       vassert(vta->guest_extents->len[i] < 10000); /* sanity */
594    }
595 
596    /* bb_to_IR() could have caused pxControl to change. */
597    vassert(*pxControl >= VexRegUpdSpAtMemAccess
598            && *pxControl <= VexRegUpdAllregsAtEachInsn);
599 
600    /* If debugging, show the raw guest bytes for this bb. */
601    if (0 || (vex_traceflags & VEX_TRACE_FE)) {
602       if (vta->guest_extents->n_used > 1) {
603          vex_printf("can't show code due to extents > 1\n");
604       } else {
605          /* HACK */
606          const UChar* p = vta->guest_bytes;
607          UInt   sum = 0;
608          UInt   guest_bytes_read = (UInt)vta->guest_extents->len[0];
609          vex_printf("GuestBytes %lx %u ", vta->guest_bytes_addr,
610                                           guest_bytes_read );
611          for (i = 0; i < guest_bytes_read; i++) {
612             UInt b = (UInt)p[i];
613             vex_printf(" %02x", b );
614             sum = (sum << 1) ^ b;
615          }
616          vex_printf("  %08x\n\n", sum);
617       }
618    }
619 
620    /* Sanity check the initial IR. */
621    sanityCheckIRSB( irsb, "initial IR",
622                     False/*can be non-flat*/, guest_word_type );
623 
624    vexAllocSanityCheck();
625 
626    /* Clean it up, hopefully a lot. */
627    irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn, *pxControl,
628                               vta->guest_bytes_addr,
629                               vta->arch_guest );
630 
631    // JRS 2016 Aug 03: Sanity checking is expensive, we already checked
632    // the output of the front end, and iropt never screws up the IR by
633    // itself, unless it is being hacked on.  So remove this post-iropt
634    // check in "production" use.
635    // sanityCheckIRSB( irsb, "after initial iropt",
636    //                  True/*must be flat*/, guest_word_type );
637 
638    if (vex_traceflags & VEX_TRACE_OPT1) {
639       vex_printf("\n------------------------"
640                    " After pre-instr IR optimisation "
641                    "------------------------\n\n");
642       ppIRSB ( irsb );
643       vex_printf("\n");
644    }
645 
646    vexAllocSanityCheck();
647 
648    /* Get the thing instrumented. */
649    if (vta->instrument1)
650       irsb = vta->instrument1(vta->callback_opaque,
651                               irsb, guest_layout,
652                               vta->guest_extents,
653                               &vta->archinfo_host,
654                               guest_word_type, host_word_type);
655    vexAllocSanityCheck();
656 
657    if (vta->instrument2)
658       irsb = vta->instrument2(vta->callback_opaque,
659                               irsb, guest_layout,
660                               vta->guest_extents,
661                               &vta->archinfo_host,
662                               guest_word_type, host_word_type);
663 
664    if (vex_traceflags & VEX_TRACE_INST) {
665       vex_printf("\n------------------------"
666                    " After instrumentation "
667                    "------------------------\n\n");
668       ppIRSB ( irsb );
669       vex_printf("\n");
670    }
671 
672    // JRS 2016 Aug 03: as above, this never actually fails in practice.
673    // And we'll sanity check anyway after the post-instrumentation
674    // cleanup pass.  So skip this check in "production" use.
675    // if (vta->instrument1 || vta->instrument2)
676    //    sanityCheckIRSB( irsb, "after instrumentation",
677    //                     True/*must be flat*/, guest_word_type );
678 
679    /* Do a post-instrumentation cleanup pass. */
680    if (vta->instrument1 || vta->instrument2) {
681       do_deadcode_BB( irsb );
682       irsb = cprop_BB( irsb );
683       do_deadcode_BB( irsb );
684       sanityCheckIRSB( irsb, "after post-instrumentation cleanup",
685                        True/*must be flat*/, guest_word_type );
686    }
687 
688    vexAllocSanityCheck();
689 
690    if (vex_traceflags & VEX_TRACE_OPT2) {
691       vex_printf("\n------------------------"
692                    " After post-instr IR optimisation "
693                    "------------------------\n\n");
694       ppIRSB ( irsb );
695       vex_printf("\n");
696    }
697 
698    return irsb;
699 }
700 
701 
702 /* Back end of the compilation pipeline.  Is not exported. */
703 
libvex_BackEnd(const VexTranslateArgs * vta,VexTranslateResult * res,IRSB * irsb,VexRegisterUpdates pxControl)704 static void libvex_BackEnd ( const VexTranslateArgs *vta,
705                              /*MOD*/ VexTranslateResult* res,
706                              /*MOD*/ IRSB* irsb,
707                              VexRegisterUpdates pxControl )
708 {
709    /* This the bundle of functions we need to do the back-end stuff
710       (insn selection, reg-alloc, assembly) whilst being insulated
711       from the target instruction set. */
712    void         (*getRegUsage)  ( HRegUsage*, const HInstr*, Bool );
713    void         (*mapRegs)      ( HRegRemap*, HInstr*, Bool );
714    void         (*genSpill)     ( HInstr**, HInstr**, HReg, Int, Bool );
715    void         (*genReload)    ( HInstr**, HInstr**, HReg, Int, Bool );
716    HInstr*      (*genMove)      ( HReg, HReg, Bool );
717    HInstr*      (*directReload) ( HInstr*, HReg, Short );
718    void         (*ppInstr)      ( const HInstr*, Bool );
719    UInt         (*ppReg)        ( HReg );
720    HInstrArray* (*iselSB)       ( const IRSB*, VexArch, const VexArchInfo*,
721                                   const VexAbiInfo*, Int, Int, Bool, Bool,
722                                   Addr );
723    Int          (*emit)         ( /*MB_MOD*/Bool*,
724                                   UChar*, Int, const HInstr*, Bool, VexEndness,
725                                   const void*, const void*, const void*,
726                                   const void* );
727    Bool (*preciseMemExnsFn) ( Int, Int, VexRegisterUpdates );
728 
729    const RRegUniverse* rRegUniv = NULL;
730 
731    Bool            mode64, chainingAllowed;
732    Int             i, j, k, out_used;
733    Int guest_sizeB;
734    Int offB_HOST_EvC_COUNTER;
735    Int offB_HOST_EvC_FAILADDR;
736    Addr            max_ga;
737    UChar           insn_bytes[128];
738    HInstrArray*    vcode;
739    HInstrArray*    rcode;
740 
741    getRegUsage             = NULL;
742    mapRegs                 = NULL;
743    genSpill                = NULL;
744    genReload               = NULL;
745    genMove                 = NULL;
746    directReload            = NULL;
747    ppInstr                 = NULL;
748    ppReg                   = NULL;
749    iselSB                  = NULL;
750    emit                    = NULL;
751 
752    mode64                 = False;
753    chainingAllowed        = False;
754    guest_sizeB            = 0;
755    offB_HOST_EvC_COUNTER  = 0;
756    offB_HOST_EvC_FAILADDR = 0;
757    preciseMemExnsFn       = NULL;
758 
759    vassert(vex_initdone);
760    vassert(vta->disp_cp_xassisted != NULL);
761 
762    vex_traceflags = vta->traceflags;
763 
764    /* Both the chainers and the indir are either NULL or non-NULL. */
765    if (vta->disp_cp_chain_me_to_slowEP        != NULL) {
766       vassert(vta->disp_cp_chain_me_to_fastEP != NULL);
767       vassert(vta->disp_cp_xindir             != NULL);
768       chainingAllowed = True;
769    } else {
770       vassert(vta->disp_cp_chain_me_to_fastEP == NULL);
771       vassert(vta->disp_cp_xindir             == NULL);
772    }
773 
774    switch (vta->arch_guest) {
775 
776       case VexArchX86:
777          preciseMemExnsFn
778             = X86FN(guest_x86_state_requires_precise_mem_exns);
779          guest_sizeB            = sizeof(VexGuestX86State);
780          offB_HOST_EvC_COUNTER  = offsetof(VexGuestX86State,host_EvC_COUNTER);
781          offB_HOST_EvC_FAILADDR = offsetof(VexGuestX86State,host_EvC_FAILADDR);
782          break;
783 
784       case VexArchAMD64:
785          preciseMemExnsFn
786             = AMD64FN(guest_amd64_state_requires_precise_mem_exns);
787          guest_sizeB            = sizeof(VexGuestAMD64State);
788          offB_HOST_EvC_COUNTER  = offsetof(VexGuestAMD64State,host_EvC_COUNTER);
789          offB_HOST_EvC_FAILADDR = offsetof(VexGuestAMD64State,host_EvC_FAILADDR);
790          break;
791 
792       case VexArchPPC32:
793          preciseMemExnsFn
794             = PPC32FN(guest_ppc32_state_requires_precise_mem_exns);
795          guest_sizeB            = sizeof(VexGuestPPC32State);
796          offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC32State,host_EvC_COUNTER);
797          offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC32State,host_EvC_FAILADDR);
798          break;
799 
800       case VexArchPPC64:
801          preciseMemExnsFn
802             = PPC64FN(guest_ppc64_state_requires_precise_mem_exns);
803          guest_sizeB            = sizeof(VexGuestPPC64State);
804          offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC64State,host_EvC_COUNTER);
805          offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC64State,host_EvC_FAILADDR);
806          break;
807 
808       case VexArchS390X:
809          preciseMemExnsFn
810             = S390FN(guest_s390x_state_requires_precise_mem_exns);
811          guest_sizeB            = sizeof(VexGuestS390XState);
812          offB_HOST_EvC_COUNTER  = offsetof(VexGuestS390XState,host_EvC_COUNTER);
813          offB_HOST_EvC_FAILADDR = offsetof(VexGuestS390XState,host_EvC_FAILADDR);
814          break;
815 
816       case VexArchARM:
817          preciseMemExnsFn
818             = ARMFN(guest_arm_state_requires_precise_mem_exns);
819          guest_sizeB            = sizeof(VexGuestARMState);
820          offB_HOST_EvC_COUNTER  = offsetof(VexGuestARMState,host_EvC_COUNTER);
821          offB_HOST_EvC_FAILADDR = offsetof(VexGuestARMState,host_EvC_FAILADDR);
822          break;
823 
824       case VexArchARM64:
825          preciseMemExnsFn
826             = ARM64FN(guest_arm64_state_requires_precise_mem_exns);
827          guest_sizeB            = sizeof(VexGuestARM64State);
828          offB_HOST_EvC_COUNTER  = offsetof(VexGuestARM64State,host_EvC_COUNTER);
829          offB_HOST_EvC_FAILADDR = offsetof(VexGuestARM64State,host_EvC_FAILADDR);
830          break;
831 
832       case VexArchMIPS32:
833          preciseMemExnsFn
834             = MIPS32FN(guest_mips32_state_requires_precise_mem_exns);
835          guest_sizeB            = sizeof(VexGuestMIPS32State);
836          offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS32State,host_EvC_COUNTER);
837          offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS32State,host_EvC_FAILADDR);
838          break;
839 
840       case VexArchMIPS64:
841          preciseMemExnsFn
842             = MIPS64FN(guest_mips64_state_requires_precise_mem_exns);
843          guest_sizeB            = sizeof(VexGuestMIPS64State);
844          offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS64State,host_EvC_COUNTER);
845          offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS64State,host_EvC_FAILADDR);
846          break;
847 
848       default:
849          vpanic("LibVEX_Codegen: unsupported guest insn set");
850    }
851 
852 
853    switch (vta->arch_host) {
854 
855       case VexArchX86:
856          mode64       = False;
857          rRegUniv     = X86FN(getRRegUniverse_X86());
858          getRegUsage
859             = CAST_TO_TYPEOF(getRegUsage) X86FN(getRegUsage_X86Instr);
860          mapRegs      = CAST_TO_TYPEOF(mapRegs) X86FN(mapRegs_X86Instr);
861          genSpill     = CAST_TO_TYPEOF(genSpill) X86FN(genSpill_X86);
862          genReload    = CAST_TO_TYPEOF(genReload) X86FN(genReload_X86);
863          genMove      = CAST_TO_TYPEOF(genMove) X86FN(genMove_X86);
864          directReload = CAST_TO_TYPEOF(directReload) X86FN(directReload_X86);
865          ppInstr      = CAST_TO_TYPEOF(ppInstr) X86FN(ppX86Instr);
866          ppReg        = CAST_TO_TYPEOF(ppReg) X86FN(ppHRegX86);
867          iselSB       = X86FN(iselSB_X86);
868          emit         = CAST_TO_TYPEOF(emit) X86FN(emit_X86Instr);
869          vassert(vta->archinfo_host.endness == VexEndnessLE);
870          break;
871 
872       case VexArchAMD64:
873          mode64       = True;
874          rRegUniv     = AMD64FN(getRRegUniverse_AMD64());
875          getRegUsage
876             = CAST_TO_TYPEOF(getRegUsage) AMD64FN(getRegUsage_AMD64Instr);
877          mapRegs      = CAST_TO_TYPEOF(mapRegs) AMD64FN(mapRegs_AMD64Instr);
878          genSpill     = CAST_TO_TYPEOF(genSpill) AMD64FN(genSpill_AMD64);
879          genReload    = CAST_TO_TYPEOF(genReload) AMD64FN(genReload_AMD64);
880          genMove      = CAST_TO_TYPEOF(genMove) AMD64FN(genMove_AMD64);
881          directReload = CAST_TO_TYPEOF(directReload) AMD64FN(directReload_AMD64);
882          ppInstr      = CAST_TO_TYPEOF(ppInstr) AMD64FN(ppAMD64Instr);
883          ppReg        = CAST_TO_TYPEOF(ppReg) AMD64FN(ppHRegAMD64);
884          iselSB       = AMD64FN(iselSB_AMD64);
885          emit         = CAST_TO_TYPEOF(emit) AMD64FN(emit_AMD64Instr);
886          vassert(vta->archinfo_host.endness == VexEndnessLE);
887          break;
888 
889       case VexArchPPC32:
890          mode64       = False;
891          rRegUniv     = PPC32FN(getRRegUniverse_PPC(mode64));
892          getRegUsage
893             = CAST_TO_TYPEOF(getRegUsage) PPC32FN(getRegUsage_PPCInstr);
894          mapRegs      = CAST_TO_TYPEOF(mapRegs) PPC32FN(mapRegs_PPCInstr);
895          genSpill     = CAST_TO_TYPEOF(genSpill) PPC32FN(genSpill_PPC);
896          genReload    = CAST_TO_TYPEOF(genReload) PPC32FN(genReload_PPC);
897          genMove      = CAST_TO_TYPEOF(genMove) PPC32FN(genMove_PPC);
898          ppInstr      = CAST_TO_TYPEOF(ppInstr) PPC32FN(ppPPCInstr);
899          ppReg        = CAST_TO_TYPEOF(ppReg) PPC32FN(ppHRegPPC);
900          iselSB       = PPC32FN(iselSB_PPC);
901          emit         = CAST_TO_TYPEOF(emit) PPC32FN(emit_PPCInstr);
902          vassert(vta->archinfo_host.endness == VexEndnessBE);
903          break;
904 
905       case VexArchPPC64:
906          mode64       = True;
907          rRegUniv     = PPC64FN(getRRegUniverse_PPC(mode64));
908          getRegUsage
909             = CAST_TO_TYPEOF(getRegUsage) PPC64FN(getRegUsage_PPCInstr);
910          mapRegs      = CAST_TO_TYPEOF(mapRegs) PPC64FN(mapRegs_PPCInstr);
911          genSpill     = CAST_TO_TYPEOF(genSpill) PPC64FN(genSpill_PPC);
912          genReload    = CAST_TO_TYPEOF(genReload) PPC64FN(genReload_PPC);
913          genMove      = CAST_TO_TYPEOF(genMove) PPC64FN(genMove_PPC);
914          ppInstr      = CAST_TO_TYPEOF(ppInstr) PPC64FN(ppPPCInstr);
915          ppReg        = CAST_TO_TYPEOF(ppReg) PPC64FN(ppHRegPPC);
916          iselSB       = PPC64FN(iselSB_PPC);
917          emit         = CAST_TO_TYPEOF(emit) PPC64FN(emit_PPCInstr);
918          vassert(vta->archinfo_host.endness == VexEndnessBE ||
919                  vta->archinfo_host.endness == VexEndnessLE );
920          break;
921 
922       case VexArchS390X:
923          mode64       = True;
924          rRegUniv     = S390FN(getRRegUniverse_S390());
925          getRegUsage
926             = CAST_TO_TYPEOF(getRegUsage) S390FN(getRegUsage_S390Instr);
927          mapRegs      = CAST_TO_TYPEOF(mapRegs) S390FN(mapRegs_S390Instr);
928          genSpill     = CAST_TO_TYPEOF(genSpill) S390FN(genSpill_S390);
929          genReload    = CAST_TO_TYPEOF(genReload) S390FN(genReload_S390);
930          genMove      = CAST_TO_TYPEOF(genMove) S390FN(genMove_S390);
931          // fixs390: consider implementing directReload_S390
932          ppInstr      = CAST_TO_TYPEOF(ppInstr) S390FN(ppS390Instr);
933          ppReg        = CAST_TO_TYPEOF(ppReg) S390FN(ppHRegS390);
934          iselSB       = S390FN(iselSB_S390);
935          emit         = CAST_TO_TYPEOF(emit) S390FN(emit_S390Instr);
936          vassert(vta->archinfo_host.endness == VexEndnessBE);
937          break;
938 
939       case VexArchARM:
940          mode64       = False;
941          rRegUniv     = ARMFN(getRRegUniverse_ARM());
942          getRegUsage
943             = CAST_TO_TYPEOF(getRegUsage) ARMFN(getRegUsage_ARMInstr);
944          mapRegs      = CAST_TO_TYPEOF(mapRegs) ARMFN(mapRegs_ARMInstr);
945          genSpill     = CAST_TO_TYPEOF(genSpill) ARMFN(genSpill_ARM);
946          genReload    = CAST_TO_TYPEOF(genReload) ARMFN(genReload_ARM);
947          genMove      = CAST_TO_TYPEOF(genMove) ARMFN(genMove_ARM);
948          ppInstr      = CAST_TO_TYPEOF(ppInstr) ARMFN(ppARMInstr);
949          ppReg        = CAST_TO_TYPEOF(ppReg) ARMFN(ppHRegARM);
950          iselSB       = ARMFN(iselSB_ARM);
951          emit         = CAST_TO_TYPEOF(emit) ARMFN(emit_ARMInstr);
952          vassert(vta->archinfo_host.endness == VexEndnessLE);
953          break;
954 
955       case VexArchARM64:
956          mode64       = True;
957          rRegUniv     = ARM64FN(getRRegUniverse_ARM64());
958          getRegUsage
959             = CAST_TO_TYPEOF(getRegUsage) ARM64FN(getRegUsage_ARM64Instr);
960          mapRegs      = CAST_TO_TYPEOF(mapRegs) ARM64FN(mapRegs_ARM64Instr);
961          genSpill     = CAST_TO_TYPEOF(genSpill) ARM64FN(genSpill_ARM64);
962          genReload    = CAST_TO_TYPEOF(genReload) ARM64FN(genReload_ARM64);
963          genMove      = CAST_TO_TYPEOF(genMove) ARM64FN(genMove_ARM64);
964          ppInstr      = CAST_TO_TYPEOF(ppInstr) ARM64FN(ppARM64Instr);
965          ppReg        = CAST_TO_TYPEOF(ppReg) ARM64FN(ppHRegARM64);
966          iselSB       = ARM64FN(iselSB_ARM64);
967          emit         = CAST_TO_TYPEOF(emit) ARM64FN(emit_ARM64Instr);
968          vassert(vta->archinfo_host.endness == VexEndnessLE);
969          break;
970 
971       case VexArchMIPS32:
972          mode64       = False;
973          rRegUniv     = MIPS32FN(getRRegUniverse_MIPS(mode64));
974          getRegUsage
975             = CAST_TO_TYPEOF(getRegUsage) MIPS32FN(getRegUsage_MIPSInstr);
976          mapRegs      = CAST_TO_TYPEOF(mapRegs) MIPS32FN(mapRegs_MIPSInstr);
977          genSpill     = CAST_TO_TYPEOF(genSpill) MIPS32FN(genSpill_MIPS);
978          genReload    = CAST_TO_TYPEOF(genReload) MIPS32FN(genReload_MIPS);
979          genMove      = CAST_TO_TYPEOF(genMove) MIPS32FN(genMove_MIPS);
980          ppInstr      = CAST_TO_TYPEOF(ppInstr) MIPS32FN(ppMIPSInstr);
981          ppReg        = CAST_TO_TYPEOF(ppReg) MIPS32FN(ppHRegMIPS);
982          iselSB       = MIPS32FN(iselSB_MIPS);
983          emit         = CAST_TO_TYPEOF(emit) MIPS32FN(emit_MIPSInstr);
984          vassert(vta->archinfo_host.endness == VexEndnessLE
985                  || vta->archinfo_host.endness == VexEndnessBE);
986          break;
987 
988       case VexArchMIPS64:
989          mode64       = True;
990          rRegUniv     = MIPS64FN(getRRegUniverse_MIPS(mode64));
991          getRegUsage
992             = CAST_TO_TYPEOF(getRegUsage) MIPS64FN(getRegUsage_MIPSInstr);
993          mapRegs      = CAST_TO_TYPEOF(mapRegs) MIPS64FN(mapRegs_MIPSInstr);
994          genSpill     = CAST_TO_TYPEOF(genSpill) MIPS64FN(genSpill_MIPS);
995          genReload    = CAST_TO_TYPEOF(genReload) MIPS64FN(genReload_MIPS);
996          genMove      = CAST_TO_TYPEOF(genMove) MIPS64FN(genMove_MIPS);
997          ppInstr      = CAST_TO_TYPEOF(ppInstr) MIPS64FN(ppMIPSInstr);
998          ppReg        = CAST_TO_TYPEOF(ppReg) MIPS64FN(ppHRegMIPS);
999          iselSB       = MIPS64FN(iselSB_MIPS);
1000          emit         = CAST_TO_TYPEOF(emit) MIPS64FN(emit_MIPSInstr);
1001          vassert(vta->archinfo_host.endness == VexEndnessLE
1002                  || vta->archinfo_host.endness == VexEndnessBE);
1003          break;
1004 
1005       default:
1006          vpanic("LibVEX_Translate: unsupported host insn set");
1007    }
1008 
1009    // Are the host's hardware capabilities feasible. The function will
1010    // not return if hwcaps are infeasible in some sense.
1011    check_hwcaps(vta->arch_host, vta->archinfo_host.hwcaps);
1012 
1013 
1014    /* Turn it into virtual-registerised code.  Build trees -- this
1015       also throws away any dead bindings. */
1016    max_ga = ado_treebuild_BB( irsb, preciseMemExnsFn, pxControl );
1017 
1018    if (vta->finaltidy) {
1019       irsb = vta->finaltidy(irsb);
1020    }
1021 
1022    vexAllocSanityCheck();
1023 
1024    if (vex_traceflags & VEX_TRACE_TREES) {
1025       vex_printf("\n------------------------"
1026                    "  After tree-building "
1027                    "------------------------\n\n");
1028       ppIRSB ( irsb );
1029       vex_printf("\n");
1030    }
1031 
1032    /* HACK */
1033    if (0) {
1034       *(vta->host_bytes_used) = 0;
1035       res->status = VexTransOK; return;
1036    }
1037    /* end HACK */
1038 
1039    if (vex_traceflags & VEX_TRACE_VCODE)
1040       vex_printf("\n------------------------"
1041                    " Instruction selection "
1042                    "------------------------\n");
1043 
1044    /* No guest has its IP field at offset zero.  If this fails it
1045       means some transformation pass somewhere failed to update/copy
1046       irsb->offsIP properly. */
1047    vassert(irsb->offsIP >= 16);
1048 
1049    vcode = iselSB ( irsb, vta->arch_host,
1050                     &vta->archinfo_host,
1051                     &vta->abiinfo_both,
1052                     offB_HOST_EvC_COUNTER,
1053                     offB_HOST_EvC_FAILADDR,
1054                     chainingAllowed,
1055                     vta->addProfInc,
1056                     max_ga );
1057 
1058    vexAllocSanityCheck();
1059 
1060    if (vex_traceflags & VEX_TRACE_VCODE)
1061       vex_printf("\n");
1062 
1063    if (vex_traceflags & VEX_TRACE_VCODE) {
1064       for (i = 0; i < vcode->arr_used; i++) {
1065          vex_printf("%3d   ", i);
1066          ppInstr(vcode->arr[i], mode64);
1067          vex_printf("\n");
1068       }
1069       vex_printf("\n");
1070    }
1071 
1072    /* Register allocate. */
1073    RegAllocControl con = {
1074       .univ = rRegUniv, .getRegUsage = getRegUsage, .mapRegs = mapRegs,
1075       .genSpill = genSpill, .genReload = genReload, .genMove = genMove,
1076       .directReload = directReload, .guest_sizeB = guest_sizeB,
1077       .ppInstr = ppInstr, .ppReg = ppReg, .mode64 = mode64};
1078    switch (vex_control.regalloc_version) {
1079    case 2:
1080       rcode = doRegisterAllocation_v2(vcode, &con);
1081       break;
1082    case 3:
1083       rcode = doRegisterAllocation_v3(vcode, &con);
1084       break;
1085    default:
1086       vassert(0);
1087    }
1088 
1089    vexAllocSanityCheck();
1090 
1091    if (vex_traceflags & VEX_TRACE_RCODE) {
1092       vex_printf("\n------------------------"
1093                    " Register-allocated code "
1094                    "------------------------\n\n");
1095       for (i = 0; i < rcode->arr_used; i++) {
1096          vex_printf("%3d   ", i);
1097          ppInstr(rcode->arr[i], mode64);
1098          vex_printf("\n");
1099       }
1100       vex_printf("\n");
1101    }
1102 
1103    /* HACK */
1104    if (0) {
1105       *(vta->host_bytes_used) = 0;
1106       res->status = VexTransOK; return;
1107    }
1108    /* end HACK */
1109 
1110    /* Assemble */
1111    if (vex_traceflags & VEX_TRACE_ASM) {
1112       vex_printf("\n------------------------"
1113                    " Assembly "
1114                    "------------------------\n\n");
1115    }
1116 
1117    out_used = 0; /* tracks along the host_bytes array */
1118    for (i = 0; i < rcode->arr_used; i++) {
1119       HInstr* hi           = rcode->arr[i];
1120       Bool    hi_isProfInc = False;
1121       if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
1122          ppInstr(hi, mode64);
1123          vex_printf("\n");
1124       }
1125       j = emit( &hi_isProfInc,
1126                 insn_bytes, sizeof insn_bytes, hi,
1127                 mode64, vta->archinfo_host.endness,
1128                 vta->disp_cp_chain_me_to_slowEP,
1129                 vta->disp_cp_chain_me_to_fastEP,
1130                 vta->disp_cp_xindir,
1131                 vta->disp_cp_xassisted );
1132       if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
1133          for (k = 0; k < j; k++)
1134             vex_printf("%02x ", (UInt)insn_bytes[k]);
1135          vex_printf("\n\n");
1136       }
1137       if (UNLIKELY(out_used + j > vta->host_bytes_size)) {
1138          vexSetAllocModeTEMP_and_clear();
1139          vex_traceflags = 0;
1140          res->status = VexTransOutputFull;
1141          return;
1142       }
1143       if (UNLIKELY(hi_isProfInc)) {
1144          vassert(vta->addProfInc); /* else where did it come from? */
1145          vassert(res->offs_profInc == -1); /* there can be only one (tm) */
1146          vassert(out_used >= 0);
1147          res->offs_profInc = out_used;
1148       }
1149       { UChar* dst = &vta->host_bytes[out_used];
1150         for (k = 0; k < j; k++) {
1151            dst[k] = insn_bytes[k];
1152         }
1153         out_used += j;
1154       }
1155    }
1156    *(vta->host_bytes_used) = out_used;
1157 
1158    vexAllocSanityCheck();
1159 
1160    vexSetAllocModeTEMP_and_clear();
1161 
1162    if (vex_traceflags) {
1163       /* Print the expansion ratio for this SB. */
1164       j = 0; /* total guest bytes */
1165       for (i = 0; i < vta->guest_extents->n_used; i++) {
1166          j += vta->guest_extents->len[i];
1167       }
1168       if (1) vex_printf("VexExpansionRatio %d %d   %d :10\n\n",
1169                         j, out_used, (10 * out_used) / (j == 0 ? 1 : j));
1170    }
1171 
1172    vex_traceflags = 0;
1173    res->status = VexTransOK;
1174    return;
1175 }
1176 
1177 
1178 /* Exported to library client. */
1179 
LibVEX_Translate(VexTranslateArgs * vta)1180 VexTranslateResult LibVEX_Translate ( /*MOD*/ VexTranslateArgs* vta )
1181 {
1182    VexTranslateResult res = { 0 };
1183    VexRegisterUpdates pxControl = VexRegUpd_INVALID;
1184 
1185    IRSB* irsb = LibVEX_FrontEnd(vta, &res, &pxControl);
1186    libvex_BackEnd(vta, &res, irsb, pxControl);
1187    return res;
1188 }
1189 
1190 
1191 /* --------- Chain/Unchain XDirects. --------- */
1192 
LibVEX_Chain(VexArch arch_host,VexEndness endness_host,void * place_to_chain,const void * disp_cp_chain_me_EXPECTED,const void * place_to_jump_to)1193 VexInvalRange LibVEX_Chain ( VexArch     arch_host,
1194                              VexEndness  endness_host,
1195                              void*       place_to_chain,
1196                              const void* disp_cp_chain_me_EXPECTED,
1197                              const void* place_to_jump_to )
1198 {
1199    switch (arch_host) {
1200       case VexArchX86:
1201          X86ST(return chainXDirect_X86(endness_host,
1202                                        place_to_chain,
1203                                        disp_cp_chain_me_EXPECTED,
1204                                        place_to_jump_to));
1205       case VexArchAMD64:
1206          AMD64ST(return chainXDirect_AMD64(endness_host,
1207                                            place_to_chain,
1208                                            disp_cp_chain_me_EXPECTED,
1209                                            place_to_jump_to));
1210       case VexArchARM:
1211          ARMST(return chainXDirect_ARM(endness_host,
1212                                        place_to_chain,
1213                                        disp_cp_chain_me_EXPECTED,
1214                                        place_to_jump_to));
1215       case VexArchARM64:
1216          ARM64ST(return chainXDirect_ARM64(endness_host,
1217                                            place_to_chain,
1218                                            disp_cp_chain_me_EXPECTED,
1219                                            place_to_jump_to));
1220       case VexArchS390X:
1221          S390ST(return chainXDirect_S390(endness_host,
1222                                          place_to_chain,
1223                                          disp_cp_chain_me_EXPECTED,
1224                                          place_to_jump_to));
1225       case VexArchPPC32:
1226          PPC32ST(return chainXDirect_PPC(endness_host,
1227                                          place_to_chain,
1228                                          disp_cp_chain_me_EXPECTED,
1229                                          place_to_jump_to, False/*!mode64*/));
1230       case VexArchPPC64:
1231          PPC64ST(return chainXDirect_PPC(endness_host,
1232                                          place_to_chain,
1233                                          disp_cp_chain_me_EXPECTED,
1234                                          place_to_jump_to, True/*mode64*/));
1235       case VexArchMIPS32:
1236          MIPS32ST(return chainXDirect_MIPS(endness_host,
1237                                            place_to_chain,
1238                                            disp_cp_chain_me_EXPECTED,
1239                                            place_to_jump_to, False/*!mode64*/));
1240       case VexArchMIPS64:
1241          MIPS64ST(return chainXDirect_MIPS(endness_host,
1242                                            place_to_chain,
1243                                            disp_cp_chain_me_EXPECTED,
1244                                            place_to_jump_to, True/*!mode64*/));
1245       default:
1246          vassert(0);
1247    }
1248 }
1249 
LibVEX_UnChain(VexArch arch_host,VexEndness endness_host,void * place_to_unchain,const void * place_to_jump_to_EXPECTED,const void * disp_cp_chain_me)1250 VexInvalRange LibVEX_UnChain ( VexArch     arch_host,
1251                                VexEndness  endness_host,
1252                                void*       place_to_unchain,
1253                                const void* place_to_jump_to_EXPECTED,
1254                                const void* disp_cp_chain_me )
1255 {
1256    switch (arch_host) {
1257       case VexArchX86:
1258          X86ST(return unchainXDirect_X86(endness_host,
1259                                          place_to_unchain,
1260                                          place_to_jump_to_EXPECTED,
1261                                          disp_cp_chain_me));
1262       case VexArchAMD64:
1263          AMD64ST(return unchainXDirect_AMD64(endness_host,
1264                                              place_to_unchain,
1265                                              place_to_jump_to_EXPECTED,
1266                                              disp_cp_chain_me));
1267       case VexArchARM:
1268          ARMST(return unchainXDirect_ARM(endness_host,
1269                                          place_to_unchain,
1270                                          place_to_jump_to_EXPECTED,
1271                                          disp_cp_chain_me));
1272       case VexArchARM64:
1273          ARM64ST(return unchainXDirect_ARM64(endness_host,
1274                                              place_to_unchain,
1275                                              place_to_jump_to_EXPECTED,
1276                                              disp_cp_chain_me));
1277       case VexArchS390X:
1278          S390ST(return unchainXDirect_S390(endness_host,
1279                                            place_to_unchain,
1280                                            place_to_jump_to_EXPECTED,
1281                                            disp_cp_chain_me));
1282       case VexArchPPC32:
1283          PPC32ST(return unchainXDirect_PPC(endness_host,
1284                                            place_to_unchain,
1285                                            place_to_jump_to_EXPECTED,
1286                                            disp_cp_chain_me, False/*!mode64*/));
1287       case VexArchPPC64:
1288          PPC64ST(return unchainXDirect_PPC(endness_host,
1289                                            place_to_unchain,
1290                                            place_to_jump_to_EXPECTED,
1291                                            disp_cp_chain_me, True/*mode64*/));
1292       case VexArchMIPS32:
1293          MIPS32ST(return unchainXDirect_MIPS(endness_host,
1294                                              place_to_unchain,
1295                                              place_to_jump_to_EXPECTED,
1296                                              disp_cp_chain_me, False/*!mode64*/));
1297       case VexArchMIPS64:
1298          MIPS64ST(return unchainXDirect_MIPS(endness_host,
1299                                              place_to_unchain,
1300                                              place_to_jump_to_EXPECTED,
1301                                              disp_cp_chain_me, True/*!mode64*/));
1302       default:
1303          vassert(0);
1304    }
1305 }
1306 
LibVEX_evCheckSzB(VexArch arch_host)1307 Int LibVEX_evCheckSzB ( VexArch    arch_host )
1308 {
1309    static Int cached = 0; /* DO NOT MAKE NON-STATIC */
1310    if (UNLIKELY(cached == 0)) {
1311       switch (arch_host) {
1312          case VexArchX86:
1313             X86ST(cached = evCheckSzB_X86()); break;
1314          case VexArchAMD64:
1315             AMD64ST(cached = evCheckSzB_AMD64()); break;
1316          case VexArchARM:
1317             ARMST(cached = evCheckSzB_ARM()); break;
1318          case VexArchARM64:
1319             ARM64ST(cached = evCheckSzB_ARM64()); break;
1320          case VexArchS390X:
1321             S390ST(cached = evCheckSzB_S390()); break;
1322          case VexArchPPC32:
1323             PPC32ST(cached = evCheckSzB_PPC()); break;
1324          case VexArchPPC64:
1325             PPC64ST(cached = evCheckSzB_PPC()); break;
1326          case VexArchMIPS32:
1327             MIPS32ST(cached = evCheckSzB_MIPS()); break;
1328          case VexArchMIPS64:
1329             MIPS64ST(cached = evCheckSzB_MIPS()); break;
1330          default:
1331             vassert(0);
1332       }
1333    }
1334    return cached;
1335 }
1336 
LibVEX_PatchProfInc(VexArch arch_host,VexEndness endness_host,void * place_to_patch,const ULong * location_of_counter)1337 VexInvalRange LibVEX_PatchProfInc ( VexArch    arch_host,
1338                                     VexEndness endness_host,
1339                                     void*      place_to_patch,
1340                                     const ULong* location_of_counter )
1341 {
1342    switch (arch_host) {
1343       case VexArchX86:
1344          X86ST(return patchProfInc_X86(endness_host, place_to_patch,
1345                                        location_of_counter));
1346       case VexArchAMD64:
1347          AMD64ST(return patchProfInc_AMD64(endness_host, place_to_patch,
1348                                            location_of_counter));
1349       case VexArchARM:
1350          ARMST(return patchProfInc_ARM(endness_host, place_to_patch,
1351                                        location_of_counter));
1352       case VexArchARM64:
1353          ARM64ST(return patchProfInc_ARM64(endness_host, place_to_patch,
1354                                            location_of_counter));
1355       case VexArchS390X:
1356          S390ST(return patchProfInc_S390(endness_host, place_to_patch,
1357                                          location_of_counter));
1358       case VexArchPPC32:
1359          PPC32ST(return patchProfInc_PPC(endness_host, place_to_patch,
1360                                          location_of_counter, False/*!mode64*/));
1361       case VexArchPPC64:
1362          PPC64ST(return patchProfInc_PPC(endness_host, place_to_patch,
1363                                          location_of_counter, True/*mode64*/));
1364       case VexArchMIPS32:
1365          MIPS32ST(return patchProfInc_MIPS(endness_host, place_to_patch,
1366                                            location_of_counter, False/*!mode64*/));
1367       case VexArchMIPS64:
1368          MIPS64ST(return patchProfInc_MIPS(endness_host, place_to_patch,
1369                                            location_of_counter, True/*!mode64*/));
1370       default:
1371          vassert(0);
1372    }
1373 }
1374 
1375 
1376 /* --------- Emulation warnings. --------- */
1377 
LibVEX_EmNote_string(VexEmNote ew)1378 const HChar* LibVEX_EmNote_string ( VexEmNote ew )
1379 {
1380    switch (ew) {
1381      case EmNote_NONE:
1382         return "none";
1383      case EmWarn_X86_x87exns:
1384         return "Unmasking x87 FP exceptions";
1385      case EmWarn_X86_x87precision:
1386         return "Selection of non-80-bit x87 FP precision";
1387      case EmWarn_X86_sseExns:
1388         return "Unmasking SSE FP exceptions";
1389      case EmWarn_X86_fz:
1390         return "Setting %mxcsr.fz (SSE flush-underflows-to-zero mode)";
1391      case EmWarn_X86_daz:
1392         return "Setting %mxcsr.daz (SSE treat-denormals-as-zero mode)";
1393      case EmWarn_X86_acFlag:
1394         return "Setting %eflags.ac (setting noted but ignored)";
1395      case EmWarn_PPCexns:
1396         return "Unmasking PPC32/64 FP exceptions";
1397      case EmWarn_PPC64_redir_overflow:
1398         return "PPC64 function redirection stack overflow";
1399      case EmWarn_PPC64_redir_underflow:
1400         return "PPC64 function redirection stack underflow";
1401      case EmWarn_S390X_fpext_rounding:
1402         return "The specified rounding mode cannot be supported. That\n"
1403                "  feature requires the floating point extension facility\n"
1404                "  which is not available on this host. Continuing using\n"
1405                "  the rounding mode from FPC. Results may differ!";
1406      case EmWarn_S390X_invalid_rounding:
1407         return "The specified rounding mode is invalid.\n"
1408                "  Continuing using 'round to nearest'. Results may differ!";
1409      case EmFail_S390X_stfle:
1410         return "Instruction stfle is not supported on this host";
1411      case EmFail_S390X_stckf:
1412         return "Instruction stckf is not supported on this host";
1413      case EmFail_S390X_ecag:
1414         return "Instruction ecag is not supported on this host";
1415      case EmFail_S390X_pfpo:
1416         return "Instruction pfpo is not supported on this host";
1417      case EmFail_S390X_DFP_insn:
1418         return "DFP instructions are not supported on this host";
1419      case EmFail_S390X_fpext:
1420         return "Encountered an instruction that requires the floating "
1421                "point extension facility.\n"
1422                "  That facility is not available on this host";
1423      case EmFail_S390X_invalid_PFPO_rounding_mode:
1424         return "The rounding mode in GPR 0 for the PFPO instruction"
1425                " is invalid";
1426      case EmFail_S390X_invalid_PFPO_function:
1427         return "The function code in GPR 0 for the PFPO instruction"
1428                " is invalid";
1429      case EmFail_S390X_vx:
1430         return "Encountered an instruction that requires the vector facility.\n"
1431                "  That facility is not available on this host";
1432      default:
1433         vpanic("LibVEX_EmNote_string: unknown warning");
1434    }
1435 }
1436 
1437 /* ------------------ Arch/HwCaps stuff. ------------------ */
1438 
LibVEX_ppVexArch(VexArch arch)1439 const HChar* LibVEX_ppVexArch ( VexArch arch )
1440 {
1441    switch (arch) {
1442       case VexArch_INVALID: return "INVALID";
1443       case VexArchX86:      return "X86";
1444       case VexArchAMD64:    return "AMD64";
1445       case VexArchARM:      return "ARM";
1446       case VexArchARM64:    return "ARM64";
1447       case VexArchPPC32:    return "PPC32";
1448       case VexArchPPC64:    return "PPC64";
1449       case VexArchS390X:    return "S390X";
1450       case VexArchMIPS32:   return "MIPS32";
1451       case VexArchMIPS64:   return "MIPS64";
1452       default:              return "VexArch???";
1453    }
1454 }
1455 
LibVEX_ppVexEndness(VexEndness endness)1456 const HChar* LibVEX_ppVexEndness ( VexEndness endness )
1457 {
1458    switch (endness) {
1459       case VexEndness_INVALID: return "INVALID";
1460       case VexEndnessLE:       return "LittleEndian";
1461       case VexEndnessBE:       return "BigEndian";
1462       default:                 return "VexEndness???";
1463    }
1464 }
1465 
1466 /* Return a string with the hardware capabilities to the extent as
1467    they pertain to the translation process. No attempt is made, to
1468    detect *all* capabilities an architecture may have. */
LibVEX_ppVexHwCaps(VexArch arch,UInt hwcaps)1469 const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
1470 {
1471    return show_hwcaps(arch, hwcaps);
1472 }
1473 
1474 
1475 /* Write default settings info *vai. */
LibVEX_default_VexArchInfo(VexArchInfo * vai)1476 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
1477 {
1478    vex_bzero(vai, sizeof(*vai));
1479    vai->hwcaps                  = 0;
1480    vai->endness                 = VexEndness_INVALID;
1481    vai->ppc_icache_line_szB     = 0;
1482    vai->ppc_dcbz_szB            = 0;
1483    vai->ppc_dcbzl_szB           = 0;
1484    vai->arm64_dMinLine_lg2_szB  = 0;
1485    vai->arm64_iMinLine_lg2_szB  = 0;
1486    vai->arm64_requires_fallback_LLSC = False;
1487    vai->hwcache_info.num_levels = 0;
1488    vai->hwcache_info.num_caches = 0;
1489    vai->hwcache_info.caches     = NULL;
1490    vai->hwcache_info.icaches_maintain_coherence = True;  // whatever
1491 }
1492 
1493 /* Write default settings info *vbi. */
LibVEX_default_VexAbiInfo(VexAbiInfo * vbi)1494 void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi )
1495 {
1496    vex_bzero(vbi, sizeof(*vbi));
1497    vbi->guest_stack_redzone_size       = 0;
1498    vbi->guest_amd64_assume_fs_is_const = False;
1499    vbi->guest_amd64_assume_gs_is_const = False;
1500    vbi->guest_ppc_zap_RZ_at_blr        = False;
1501    vbi->guest_ppc_zap_RZ_at_bl         = NULL;
1502    vbi->guest__use_fallback_LLSC       = False;
1503    vbi->host_ppc_calls_use_fndescrs    = False;
1504 }
1505 
1506 
arch_word_size(VexArch arch)1507 static IRType arch_word_size (VexArch arch) {
1508    switch (arch) {
1509       case VexArchX86:
1510       case VexArchARM:
1511       case VexArchMIPS32:
1512       case VexArchPPC32:
1513          return Ity_I32;
1514 
1515       case VexArchAMD64:
1516       case VexArchARM64:
1517       case VexArchMIPS64:
1518       case VexArchPPC64:
1519       case VexArchS390X:
1520          return Ity_I64;
1521 
1522       default:
1523          vex_printf("Fatal: unknown arch in arch_word_size\n");
1524          vassert(0);
1525    }
1526 }
1527 
1528 
1529 /* Convenience macro to be used in show_hwcaps_ARCH functions */
1530 #define NUM_HWCAPS (sizeof hwcaps_list / sizeof hwcaps_list[0])
1531 
1532 /* Return a string showing the hwcaps in a nice way.  The string will
1533    be NULL for unrecognised hardware capabilities. */
1534 
show_hwcaps_x86(UInt hwcaps)1535 static const HChar* show_hwcaps_x86 ( UInt hwcaps )
1536 {
1537    static const HChar prefix[] = "x86";
1538    static const struct {
1539       UInt  hwcaps_bit;
1540       HChar name[7];
1541    } hwcaps_list[] = {
1542       { VEX_HWCAPS_X86_MMXEXT, "mmxext" },
1543       { VEX_HWCAPS_X86_SSE1,   "sse1"   },
1544       { VEX_HWCAPS_X86_SSE2,   "sse2"   },
1545       { VEX_HWCAPS_X86_SSE3,   "sse3"   },
1546       { VEX_HWCAPS_X86_LZCNT,  "lzcnt"  },
1547    };
1548    /* Allocate a large enough buffer */
1549    static HChar buf[sizeof prefix +
1550                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1551    if (buf[0] != '\0') return buf;  /* already constructed */
1552 
1553    HChar *p = buf + vex_sprintf(buf, "%s", prefix);
1554 
1555    if (hwcaps == 0) {
1556       vex_sprintf(p, "-%s", "sse0");
1557    } else {
1558       UInt i;
1559       for (i = 0 ; i < NUM_HWCAPS; ++i) {
1560          if (hwcaps & hwcaps_list[i].hwcaps_bit)
1561             p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1562       }
1563    }
1564    return buf;
1565 }
1566 
show_hwcaps_amd64(UInt hwcaps)1567 static const HChar* show_hwcaps_amd64 ( UInt hwcaps )
1568 {
1569    static const HChar prefix[] = "amd64";
1570    static const struct {
1571       UInt  hwcaps_bit;
1572       HChar name[7];
1573    } hwcaps_list[] = {
1574       { VEX_HWCAPS_AMD64_CX16,   "cx16"   },
1575       { VEX_HWCAPS_AMD64_LZCNT,  "lzcnt"  },
1576       { VEX_HWCAPS_AMD64_RDTSCP, "rdtscp" },
1577       { VEX_HWCAPS_AMD64_SSE3,   "sse3"   },
1578       { VEX_HWCAPS_AMD64_SSSE3,  "ssse3"  },
1579       { VEX_HWCAPS_AMD64_AVX,    "avx"    },
1580       { VEX_HWCAPS_AMD64_AVX2,   "avx2"   },
1581       { VEX_HWCAPS_AMD64_BMI,    "bmi"    },
1582    };
1583    /* Allocate a large enough buffer */
1584    static HChar buf[sizeof prefix +
1585                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1586    if (buf[0] != '\0') return buf;  /* already constructed */
1587 
1588    HChar *p = buf + vex_sprintf(buf, "%s", prefix);
1589 
1590    if (hwcaps == 0) {
1591       vex_sprintf(p, "-%s", "sse2");
1592    } else {
1593       UInt i;
1594       for (i = 0 ; i < NUM_HWCAPS; ++i) {
1595          if (hwcaps & hwcaps_list[i].hwcaps_bit)
1596             p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1597       }
1598    }
1599    return buf;
1600 }
1601 
show_hwcaps_ppc32(UInt hwcaps)1602 static const HChar* show_hwcaps_ppc32 ( UInt hwcaps )
1603 {
1604    static const HChar prefix[] = "ppc32-int";
1605    static const struct {
1606       UInt  hwcaps_bit;
1607       HChar name[8];
1608    } hwcaps_list[] = {
1609       { VEX_HWCAPS_PPC32_F,       "flt"     },
1610       { VEX_HWCAPS_PPC32_V,       "vmx"     },
1611       { VEX_HWCAPS_PPC32_FX,      "FX"      },
1612       { VEX_HWCAPS_PPC32_GX,      "GX"      },
1613       { VEX_HWCAPS_PPC32_VX,      "VX"      },
1614       { VEX_HWCAPS_PPC32_DFP,     "DFP"     },
1615       { VEX_HWCAPS_PPC32_ISA2_07, "ISA2_07" },
1616       { VEX_HWCAPS_PPC32_ISA3_0,  "ISA3_0"  },
1617    };
1618    /* Allocate a large enough buffer */
1619    static HChar buf[sizeof prefix +
1620                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1621    if (buf[0] != '\0') return buf;  /* already constructed */
1622 
1623    HChar *p = buf + vex_sprintf(buf, "%s", prefix);
1624 
1625    if (hwcaps == 0) return buf;
1626 
1627    UInt i;
1628    for (i = 0 ; i < NUM_HWCAPS; ++i) {
1629       if (hwcaps & hwcaps_list[i].hwcaps_bit)
1630          p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1631    }
1632    return buf;
1633 }
1634 
show_hwcaps_ppc64(UInt hwcaps)1635 static const HChar* show_hwcaps_ppc64 ( UInt hwcaps )
1636 {
1637    static const HChar prefix[] = "ppc64-int-flt";
1638    static const struct {
1639       UInt  hwcaps_bit;
1640       HChar name[8];
1641    } hwcaps_list[] = {
1642       { VEX_HWCAPS_PPC64_FX,      "FX"      },
1643       { VEX_HWCAPS_PPC64_GX,      "GX"      },
1644       { VEX_HWCAPS_PPC64_V,       "vmx"     },
1645       { VEX_HWCAPS_PPC64_DFP,     "DFP"     },
1646       { VEX_HWCAPS_PPC64_ISA2_07, "ISA2_07" },
1647       { VEX_HWCAPS_PPC64_ISA3_0,  "ISA3_0"  },
1648    };
1649    /* Allocate a large enough buffer */
1650    static HChar buf[sizeof prefix +
1651                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1652    if (buf[0] != '\0') return buf;  /* already constructed */
1653 
1654    HChar *p = buf + vex_sprintf(buf, "%s", prefix);
1655 
1656    if (hwcaps == 0) return buf;
1657 
1658    UInt i;
1659    for (i = 0 ; i < NUM_HWCAPS; ++i) {
1660       if (hwcaps & hwcaps_list[i].hwcaps_bit)
1661          p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1662    }
1663    return buf;
1664 }
1665 
show_hwcaps_arm(UInt hwcaps)1666 static const HChar* show_hwcaps_arm ( UInt hwcaps )
1667 {
1668    static const HChar prefix[] = "ARM";
1669    static const struct {
1670       UInt  hwcaps_bit;
1671       HChar name[6];
1672    } hwcaps_list[] = {
1673       { VEX_HWCAPS_ARM_NEON, "neon" },
1674       { VEX_HWCAPS_ARM_VFP | VEX_HWCAPS_ARM_VFP2 | VEX_HWCAPS_ARM_VFP3, "vfp" },
1675    };
1676    /* Allocate a large enough buffer */
1677    static HChar buf[sizeof prefix + 12 +    // level
1678                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1679    if (buf[0] != '\0') return buf;  /* already constructed */
1680 
1681    HChar *p;
1682    UInt i, level;
1683 
1684    level = VEX_ARM_ARCHLEVEL(hwcaps);
1685 
1686    p = buf + vex_sprintf(buf, "%sv%u", prefix, level);
1687    for (i = 0 ; i < NUM_HWCAPS; ++i) {
1688       if (hwcaps & hwcaps_list[i].hwcaps_bit)
1689          p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1690    }
1691    return buf;
1692 }
1693 
show_hwcaps_arm64(UInt hwcaps)1694 static const HChar* show_hwcaps_arm64 ( UInt hwcaps )
1695 {
1696    /* Since there are no variants, just insist that hwcaps is zero,
1697       and declare it invalid otherwise. */
1698   if (hwcaps == 0)
1699      return "baseline";
1700   return "Unsupported";
1701 }
1702 
show_hwcaps_s390x(UInt hwcaps)1703 static const HChar* show_hwcaps_s390x ( UInt hwcaps )
1704 {
1705    static const HChar prefix[] = "s390x";
1706    static const struct {
1707       UInt  hwcaps_bit;
1708       HChar name[6];
1709    } hwcaps_list[] = {
1710       { VEX_HWCAPS_S390X_LDISP, "ldisp" },
1711       { VEX_HWCAPS_S390X_EIMM,  "eimm" },
1712       { VEX_HWCAPS_S390X_GIE,   "gie" },
1713       { VEX_HWCAPS_S390X_DFP,   "dfp" },
1714       { VEX_HWCAPS_S390X_FGX,   "fgx" },
1715       { VEX_HWCAPS_S390X_STFLE, "stfle" },
1716       { VEX_HWCAPS_S390X_ETF2,  "etf2" },
1717       { VEX_HWCAPS_S390X_ETF3,  "etf3" },
1718       { VEX_HWCAPS_S390X_STCKF, "stckf" },
1719       { VEX_HWCAPS_S390X_FPEXT, "fpext" },
1720       { VEX_HWCAPS_S390X_LSC,   "lsc" },
1721       { VEX_HWCAPS_S390X_PFPO,  "pfpo" },
1722    };
1723    /* Allocate a large enough buffer */
1724    static HChar buf[sizeof prefix +
1725                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1726 
1727    if (buf[0] != '\0') return buf;  /* already constructed */
1728 
1729    HChar *p;
1730    UInt i;
1731 
1732    hwcaps = VEX_HWCAPS_S390X(hwcaps);
1733 
1734    p = buf + vex_sprintf(buf, "%s", prefix);
1735    for (i = 0 ; i < NUM_HWCAPS; ++i) {
1736       if (hwcaps & hwcaps_list[i].hwcaps_bit)
1737          p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1738    }
1739 
1740    /* If there are no facilities, add "zarch" */
1741    if (hwcaps == 0)
1742      vex_sprintf(p, "-%s", "zarch");
1743 
1744    return buf;
1745 }
1746 
show_hwcaps_mips32(UInt hwcaps)1747 static const HChar* show_hwcaps_mips32 ( UInt hwcaps )
1748 {
1749    /* MIPS baseline. */
1750    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_MIPS) {
1751       /* MIPS baseline with msa. */
1752       if (VEX_MIPS_PROC_MSA(hwcaps)) {
1753          return "MIPS-baseline-msa";
1754       }
1755       /* MIPS baseline with dspr2. */
1756       if (VEX_MIPS_PROC_DSP2(hwcaps)) {
1757          return "MIPS-baseline-dspr2";
1758       }
1759       /* MIPS baseline with dsp. */
1760       if (VEX_MIPS_PROC_DSP(hwcaps)) {
1761          return "MIPS-baseline-dsp";
1762       }
1763       return "MIPS-baseline";
1764    }
1765 
1766    /* Broadcom baseline. */
1767    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_BROADCOM) {
1768       return "Broadcom-baseline";
1769    }
1770 
1771    /* Netlogic baseline. */
1772    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_NETLOGIC) {
1773       return "Netlogic-baseline";
1774    }
1775 
1776    /* Cavium baseline. */
1777    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_CAVIUM) {
1778       return "Cavium-baseline";
1779    }
1780 
1781    /* Ingenic baseline. */
1782    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_INGENIC_E1) {
1783       return "Ingenic-baseline";
1784    }
1785 
1786    /* Loongson baseline. */
1787    if ((VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_LEGACY) &&
1788        (VEX_MIPS_PROC_ID(hwcaps) == VEX_PRID_IMP_LOONGSON_64)) {
1789       return "Loongson-baseline";
1790    }
1791 
1792    return "Unsupported baseline";
1793 }
1794 
show_hwcaps_mips64(UInt hwcaps)1795 static const HChar* show_hwcaps_mips64 ( UInt hwcaps )
1796 {
1797    /* Netlogic baseline. */
1798    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_NETLOGIC) {
1799       return "Netlogic-baseline";
1800    }
1801 
1802    /* Cavium baseline. */
1803    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_CAVIUM) {
1804       return "Cavium-baseline";
1805    }
1806 
1807    /* Loongson baseline. */
1808    if ((VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_LEGACY) &&
1809        (VEX_MIPS_PROC_ID(hwcaps) == VEX_PRID_IMP_LOONGSON_64)) {
1810       return "Loongson-baseline";
1811    }
1812 
1813    /* MIPS64 baseline. */
1814    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_MIPS) {
1815       /* MIPS baseline with msa. */
1816       if (VEX_MIPS_PROC_MSA(hwcaps)) {
1817          return "MIPS64-baseline-msa";
1818       }
1819       return "MIPS64-baseline";
1820    }
1821 
1822    return "Unsupported baseline";
1823 }
1824 
1825 #undef NUM_HWCAPS
1826 
1827 /* Thie function must not return NULL. */
1828 
show_hwcaps(VexArch arch,UInt hwcaps)1829 static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps )
1830 {
1831    switch (arch) {
1832       case VexArchX86:    return show_hwcaps_x86(hwcaps);
1833       case VexArchAMD64:  return show_hwcaps_amd64(hwcaps);
1834       case VexArchPPC32:  return show_hwcaps_ppc32(hwcaps);
1835       case VexArchPPC64:  return show_hwcaps_ppc64(hwcaps);
1836       case VexArchARM:    return show_hwcaps_arm(hwcaps);
1837       case VexArchARM64:  return show_hwcaps_arm64(hwcaps);
1838       case VexArchS390X:  return show_hwcaps_s390x(hwcaps);
1839       case VexArchMIPS32: return show_hwcaps_mips32(hwcaps);
1840       case VexArchMIPS64: return show_hwcaps_mips64(hwcaps);
1841       default: return NULL;
1842    }
1843 }
1844 
1845 /* To be used to complain about hwcaps we cannot handle */
1846 __attribute__((noreturn))
invalid_hwcaps(VexArch arch,UInt hwcaps,const HChar * message)1847 static void invalid_hwcaps ( VexArch arch, UInt hwcaps, const HChar *message )
1848 {
1849    vfatal("\nVEX: %s"
1850           "     Found: %s\n", message, show_hwcaps(arch, hwcaps));
1851 }
1852 
1853 /* This function will not return iff the hwcaps don't pass the test. */
check_hwcaps(VexArch arch,UInt hwcaps)1854 static void check_hwcaps ( VexArch arch, UInt hwcaps )
1855 {
1856    switch (arch) {
1857       case VexArchX86: {
1858          if (hwcaps == 0) return;    // baseline
1859 
1860          /* Monotonic: SSE3 > SSE2 > SSE1 > MMXEXT > baseline. */
1861          static const UInt extras[] = {
1862             VEX_HWCAPS_X86_MMXEXT, VEX_HWCAPS_X86_SSE1, VEX_HWCAPS_X86_SSE2,
1863             VEX_HWCAPS_X86_SSE3
1864          };
1865 
1866          UInt i, caps = 0;
1867          for (i = 0; i < sizeof extras / sizeof extras[0]; ++i) {
1868             caps |= extras[i];
1869             if (caps == hwcaps) return;
1870             /* For SSE2 or later LZCNT is optional */
1871             if ((caps & VEX_HWCAPS_X86_SSE2) != 0) {
1872                if ((caps | VEX_HWCAPS_X86_LZCNT) == hwcaps) return;
1873             }
1874          }
1875          invalid_hwcaps(arch, hwcaps, "Cannot handle capabilities\n");
1876       }
1877 
1878       case VexArchAMD64: {
1879          /* SSE3 and CX16 are orthogonal and > baseline, although we really
1880             don't expect to come across anything which can do SSE3 but can't
1881             do CX16.  Still, we can handle that case.  LZCNT is similarly
1882             orthogonal. */
1883 
1884          /* Throw out obviously stupid cases: */
1885          Bool have_sse3  = (hwcaps & VEX_HWCAPS_AMD64_SSE3)  != 0;
1886          Bool have_ssse3 = (hwcaps & VEX_HWCAPS_AMD64_SSSE3) != 0;
1887          Bool have_avx   = (hwcaps & VEX_HWCAPS_AMD64_AVX)   != 0;
1888          Bool have_bmi   = (hwcaps & VEX_HWCAPS_AMD64_BMI)   != 0;
1889          Bool have_avx2  = (hwcaps & VEX_HWCAPS_AMD64_AVX2)  != 0;
1890 
1891          /* SSSE3 without SSE3 */
1892          if (have_ssse3 && !have_sse3)
1893             invalid_hwcaps(arch, hwcaps,
1894                            "Support for SSSE3 requires SSE3 capabilities\n");
1895          /* AVX without SSSE3 */
1896          if (have_avx && !have_ssse3)
1897             invalid_hwcaps(arch, hwcaps,
1898                            "Support for AVX requires SSSE3 capabilities\n");
1899          /* AVX2 or BMI without AVX */
1900          if (have_avx2 && !have_avx)
1901             invalid_hwcaps(arch, hwcaps,
1902                            "Support for AVX2 requires AVX capabilities\n");
1903          if (have_bmi && !have_avx)
1904             invalid_hwcaps(arch, hwcaps,
1905                            "Support for BMI requires AVX capabilities\n");
1906          return;
1907       }
1908 
1909       case VexArchPPC32: {
1910          /* Monotonic with complications.  Basically V > F > baseline,
1911             but once you have F then you can have FX or GX too. */
1912          if (hwcaps == 0) return;   // baseline
1913 
1914          if ((hwcaps & VEX_HWCAPS_PPC32_F) == 0)
1915             invalid_hwcaps(arch, hwcaps,
1916                            "Missing floating point capability\n");
1917          /* V, FX, and GX can appear in any combination */
1918 
1919          /* DFP requires V and FX and GX */
1920          UInt v_fx_gx = VEX_HWCAPS_PPC32_V | VEX_HWCAPS_PPC32_FX |
1921                         VEX_HWCAPS_PPC32_GX;
1922          Bool has_v_fx_gx = (hwcaps & v_fx_gx) == v_fx_gx;
1923 
1924          if ((hwcaps & VEX_HWCAPS_PPC32_DFP) && ! has_v_fx_gx)
1925             invalid_hwcaps(arch, hwcaps,
1926                            "DFP requires VMX and FX and GX capabilities\n");
1927 
1928          /* VX requires V and FX and GX */
1929          if ((hwcaps & VEX_HWCAPS_PPC32_VX) && ! has_v_fx_gx)
1930             invalid_hwcaps(arch, hwcaps,
1931                            "VX requires VMX and FX and GX capabilities\n");
1932 
1933          /* ISA2_07 requires everything else */
1934          if ((hwcaps & VEX_HWCAPS_PPC32_ISA2_07) != 0) {
1935             if (! has_v_fx_gx)
1936                invalid_hwcaps(arch, hwcaps,
1937                           "ISA2_07 requires VMX and FX and GX capabilities\n");
1938             if (! (hwcaps & VEX_HWCAPS_PPC32_VX))
1939                invalid_hwcaps(arch, hwcaps,
1940                               "ISA2_07 requires VX capabilities\n");
1941             if (! (hwcaps & VEX_HWCAPS_PPC32_DFP))
1942                invalid_hwcaps(arch, hwcaps,
1943                               "ISA2_07 requires DFP capabilities\n");
1944          }
1945 
1946          /* ISA 3.0 not supported on 32-bit machines */
1947          if ((hwcaps & VEX_HWCAPS_PPC32_ISA3_0) != 0) {
1948             invalid_hwcaps(arch, hwcaps,
1949                            "ISA 3.0 not supported in 32-bit mode \n");
1950          }
1951          return;
1952       }
1953 
1954       case VexArchPPC64: {
1955          /* Monotonic with complications.  Basically V > baseline(==F),
1956             but once you have F then you can have FX or GX too. */
1957          if (hwcaps == 0) return;   // baseline
1958 
1959          /* V, FX, and GX can appear in any combination */
1960 
1961          /* DFP requires V and FX and GX */
1962          UInt v_fx_gx = VEX_HWCAPS_PPC64_V | VEX_HWCAPS_PPC64_FX |
1963                         VEX_HWCAPS_PPC64_GX;
1964          Bool has_v_fx_gx = (hwcaps & v_fx_gx) == v_fx_gx;
1965 
1966          if ((hwcaps & VEX_HWCAPS_PPC64_DFP) && ! has_v_fx_gx)
1967             invalid_hwcaps(arch, hwcaps,
1968                            "DFP requires VMX and FX and GX capabilities\n");
1969 
1970          /* VX requires V and FX and GX */
1971          if ((hwcaps & VEX_HWCAPS_PPC32_VX) && ! has_v_fx_gx)
1972             invalid_hwcaps(arch, hwcaps,
1973                            "VX requires VMX and FX and GX capabilities\n");
1974 
1975          /* ISA2_07 requires everything else */
1976          if ((hwcaps & VEX_HWCAPS_PPC64_ISA2_07) != 0) {
1977             if (! has_v_fx_gx)
1978                invalid_hwcaps(arch, hwcaps,
1979                         "ISA2_07 requires VMX and FX and GX capabilities\n");
1980             if (! (hwcaps & VEX_HWCAPS_PPC64_VX))
1981                invalid_hwcaps(arch, hwcaps,
1982                               "ISA2_07 requires VX capabilities\n");
1983             if (! (hwcaps & VEX_HWCAPS_PPC64_DFP))
1984                invalid_hwcaps(arch, hwcaps,
1985                               "ISA2_07 requires DFP capabilities\n");
1986          }
1987 
1988          /* ISA3_0 requires everything else */
1989          if ((hwcaps & VEX_HWCAPS_PPC64_ISA3_0) != 0) {
1990             if ( !((hwcaps
1991                     & VEX_HWCAPS_PPC64_ISA2_07) == VEX_HWCAPS_PPC64_ISA2_07))
1992                invalid_hwcaps(arch, hwcaps,
1993                           "ISA3_0 requires ISA2_07 capabilities\n");
1994             if ( !has_v_fx_gx)
1995                invalid_hwcaps(arch, hwcaps,
1996                         "ISA3_0 requires VMX and FX and GX capabilities\n");
1997             if ( !(hwcaps & VEX_HWCAPS_PPC64_VX))
1998                invalid_hwcaps(arch, hwcaps,
1999                               "ISA3_0 requires VX capabilities\n");
2000             if ( !(hwcaps & VEX_HWCAPS_PPC64_DFP))
2001                invalid_hwcaps(arch, hwcaps,
2002                               "ISA3_0 requires DFP capabilities\n");
2003          }
2004          return;
2005       }
2006 
2007       case VexArchARM: {
2008          Bool NEON  = ((hwcaps & VEX_HWCAPS_ARM_NEON) != 0);
2009          Bool VFP3  = ((hwcaps & VEX_HWCAPS_ARM_VFP3) != 0);
2010          UInt level = VEX_ARM_ARCHLEVEL(hwcaps);
2011          switch (level) {
2012             case 5:
2013                if (NEON)
2014                   invalid_hwcaps(arch, hwcaps,
2015                           "NEON instructions are not supported for ARMv5.\n");
2016                return;
2017             case 6:
2018                if (NEON)
2019                   invalid_hwcaps(arch, hwcaps,
2020                           "NEON instructions are not supported for ARMv6.\n");
2021                return;
2022             case 7:
2023                return;
2024             case 8:
2025                if (!NEON || !VFP3)
2026                   invalid_hwcaps(arch, hwcaps,
2027                           "NEON and VFP3 are required for ARMv8.\n");
2028                return;
2029             default:
2030                invalid_hwcaps(arch, hwcaps,
2031                               "ARM architecture level is not supported.\n");
2032          }
2033       }
2034 
2035       case VexArchARM64:
2036          if (hwcaps != 0)
2037             invalid_hwcaps(arch, hwcaps,
2038                            "Unsupported hardware capabilities.\n");
2039          return;
2040 
2041       case VexArchS390X:
2042          if (! s390_host_has_ldisp)
2043             invalid_hwcaps(arch, hwcaps,
2044                            "Host does not have long displacement facility.\n");
2045          return;
2046 
2047       case VexArchMIPS32:
2048          switch (VEX_MIPS_COMP_ID(hwcaps)) {
2049             case VEX_PRID_COMP_MIPS:
2050             case VEX_PRID_COMP_CAVIUM:
2051             case VEX_PRID_COMP_INGENIC_E1:
2052             case VEX_PRID_COMP_BROADCOM:
2053             case VEX_PRID_COMP_NETLOGIC:
2054                return;
2055             default:
2056                invalid_hwcaps(arch, hwcaps, "Unsupported baseline\n");
2057          }
2058 
2059       case VexArchMIPS64:
2060          switch (VEX_MIPS_COMP_ID(hwcaps)) {
2061             case VEX_PRID_COMP_MIPS:
2062             case VEX_PRID_COMP_CAVIUM:
2063             case VEX_PRID_COMP_NETLOGIC:
2064                return;
2065             default:
2066                invalid_hwcaps(arch, hwcaps, "Unsupported baseline\n");
2067          }
2068 
2069       default:
2070          vpanic("unknown architecture");
2071    }
2072 }
2073 
2074 
2075 /*---------------------------------------------------------------*/
2076 /*--- end                                         main_main.c ---*/
2077 /*---------------------------------------------------------------*/
2078