1import logging
2from typing import Optional
3
4import pyvex
5
6from ...engine import SimEngineBase
7from ....utils.constants import DEFAULT_STATEMENT
8
9l = logging.getLogger(name=__name__)
10
11#pylint:disable=arguments-differ,unused-argument,no-self-use
12
13class VEXMixin(SimEngineBase):
14    def __init__(self, project, **kwargs):
15        super().__init__(project, **kwargs)
16        self._vex_expr_handlers = []
17        self._vex_stmt_handlers = []
18        self.__init_handlers()
19
20        self.irsb = None
21        self.stmt_idx = None
22        self.tmps = None
23
24    __tls = ('irsb', 'stmt_idx', 'tmps')
25
26    def __init_handlers(self):
27        self._vex_expr_handlers = [None]*pyvex.expr.tag_count
28        self._vex_stmt_handlers = [None]*pyvex.stmt.tag_count
29        for name, cls in vars(pyvex.expr).items():
30            if isinstance(cls, type) and issubclass(cls, pyvex.expr.IRExpr) and cls is not pyvex.expr.IRExpr:
31                self._vex_expr_handlers[cls.tag_int] = getattr(self, '_handle_vex_expr_' + name)
32        for name, cls in vars(pyvex.stmt).items():
33            if isinstance(cls, type) and issubclass(cls, pyvex.stmt.IRStmt) and cls is not pyvex.stmt.IRStmt:
34                self._vex_stmt_handlers[cls.tag_int] = getattr(self, '_handle_vex_stmt_' + name)
35        assert None not in self._vex_expr_handlers
36        assert None not in self._vex_stmt_handlers
37
38    def __getstate__(self):
39        return super().__getstate__(),   # return unary tuple to not trip special behavior with falsey states
40
41    def __setstate__(self, s):
42        self.__init_handlers()
43        super().__setstate__(s[0])
44
45    # one size fits all?
46    def _ty_to_bytes(self, ty):
47        return pyvex.get_type_size(ty) // getattr(getattr(getattr(self, 'state', None), 'arch', None), 'byte_width', 8)
48
49    def _handle_vex_stmt(self, stmt: pyvex.stmt.IRStmt):
50        handler = self._vex_stmt_handlers[stmt.tag_int]
51        handler(stmt)
52
53    def _handle_vex_expr(self, expr: pyvex.expr.IRExpr):
54        handler = self._vex_expr_handlers[expr.tag_int]
55        result = handler(expr)
56        return self._instrument_vex_expr(result)
57
58    def _instrument_vex_expr(self, result):
59        return result
60
61    def _handle_vex_const(self, const: pyvex.const.IRConst):
62        return const.value
63
64    #
65    # Individual expression handlers go here
66    #
67
68    # expressions dependent on the state impl
69
70    def _handle_vex_expr_RdTmp(self, expr: pyvex.expr.RdTmp):
71        return self._perform_vex_expr_RdTmp(expr.tmp)
72    def _perform_vex_expr_RdTmp(self, tmp):
73        return self.tmps[tmp]
74
75    def _handle_vex_expr_Get(self, expr: pyvex.expr.Get):
76        return self._perform_vex_expr_Get(
77            self._handle_vex_const(pyvex.const.U32(expr.offset)),
78            expr.ty)
79    def _perform_vex_expr_Get(self, offset, ty, **kwargs):
80        return NotImplemented
81
82    def _analyze_vex_expr_Load_addr(self, *a, **kw): return self._handle_vex_expr(*a, **kw)
83    def _handle_vex_expr_Load(self, expr: pyvex.expr.Load):
84        return self._perform_vex_expr_Load(
85            self._analyze_vex_expr_Load_addr(expr.addr),
86            expr.ty,
87            expr.end)
88    def _perform_vex_expr_Load(self, addr, ty, endness, **kwargs):
89        return NotImplemented
90
91    # expressions dependent on the data domain
92
93    def _analyze_vex_expr_CCall_arg(self, *a, **kw): return self._handle_vex_expr(*a, **kw)
94    def _handle_vex_expr_CCall(self, expr: pyvex.expr.CCall):
95        return self._perform_vex_expr_CCall(
96            expr.cee.name,
97            expr.retty,
98            [self._analyze_vex_expr_CCall_arg(arg) for arg in expr.args],
99        )
100    def _perform_vex_expr_CCall(self, func_name, ty, args, func=None):
101        return NotImplemented
102
103    def _handle_vex_expr_ITE(self, expr: pyvex.expr.ITE):
104        return self._perform_vex_expr_ITE(
105            self._handle_vex_expr(expr.cond),
106            self._handle_vex_expr(expr.iftrue),
107            self._handle_vex_expr(expr.iffalse))
108    def _perform_vex_expr_ITE(self, cond, ifTrue, ifFalse):
109        return NotImplemented
110
111    def _handle_vex_expr_Unop(self, expr: pyvex.expr.Unop):
112        return self._handle_vex_expr_Op(expr)
113    def _handle_vex_expr_Binop(self, expr: pyvex.expr.Unop):
114        return self._handle_vex_expr_Op(expr)
115    def _handle_vex_expr_Triop(self, expr: pyvex.expr.Unop):
116        return self._handle_vex_expr_Op(expr)
117    def _handle_vex_expr_Qop(self, expr: pyvex.expr.Unop):
118        return self._handle_vex_expr_Op(expr)
119    def _handle_vex_expr_Op(self, expr):
120        return self._perform_vex_expr_Op(expr.op, [self._handle_vex_expr(arg) for arg in expr.args])
121    def _perform_vex_expr_Op(self, op, args):
122        return NotImplemented
123
124    # fully implemented expressions
125
126    def _handle_vex_expr_Const(self, expr: pyvex.expr.Const):
127        return self._handle_vex_const(expr.con)
128
129    def _analyze_vex_expr_GetI_ix(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
130    def _handle_vex_expr_GetI(self, expr: pyvex.expr.GetI):
131        return self._perform_vex_expr_GetI(
132            expr.descr.base,
133            expr.descr.elemTy,
134            expr.bias,
135            self._analyze_vex_expr_GetI_ix(expr.ix),
136            expr.descr.nElems,
137        )
138
139    def _perform_vex_expr_GetI_get(self, *a, **kw): return self. _perform_vex_expr_Get(*a, **kw)
140    def _perform_vex_expr_GetI(self, base, ty, bias, ix, nElems):
141        offset = self._perform_vex_stmt_PutI_compute(base, ty, bias, ix, nElems)
142        return self._perform_vex_expr_GetI_get(offset, ty)
143
144    # oh boy.
145
146    def _handle_vex_expr_GSPTR(self, expr: pyvex.expr.GSPTR):
147        return NotImplemented
148
149    def _handle_vex_expr_VECRET(self, expr: pyvex.expr.VECRET):
150        return NotImplemented
151
152    def _handle_vex_expr_Binder(self, expr: pyvex.expr.Binder):
153        return NotImplemented
154
155
156    #
157    # Individual statement handlers go here
158    #
159
160    # stmt category 1: fluff
161
162    def _handle_vex_stmt_IMark(self, stmt):
163        pass
164
165    def _handle_vex_stmt_NoOp(self, stmt):
166        pass
167
168    def _handle_vex_stmt_AbiHint(self, stmt):
169        pass
170
171    def _handle_vex_stmt_MBE(self, stmt):
172        pass
173
174    # stmt category 2: real shit
175
176    def _analyze_vex_stmt_Put_data(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
177    def _handle_vex_stmt_Put(self, stmt):
178        self._perform_vex_stmt_Put(
179            self._handle_vex_const(pyvex.const.U32(stmt.offset)),
180            self._analyze_vex_stmt_Put_data(stmt.data))
181    def _perform_vex_stmt_Put(self, offset, data, **kwargs):
182        pass
183
184    def _analyze_vex_stmt_WrTmp_data(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
185    def _handle_vex_stmt_WrTmp(self, stmt):
186        self._perform_vex_stmt_WrTmp(
187            stmt.tmp,
188            self._analyze_vex_stmt_WrTmp_data(stmt.data)
189        )
190    def _perform_vex_stmt_WrTmp(self, tmp, data):
191        self.tmps[tmp] = data
192
193    def _analyze_vex_stmt_Store_address(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
194    def _analyze_vex_stmt_Store_data(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
195    def _handle_vex_stmt_Store(self, stmt: pyvex.stmt.Store):
196        self._perform_vex_stmt_Store(
197            self._analyze_vex_stmt_Store_address(stmt.addr),
198            self._analyze_vex_stmt_Store_data(stmt.data),
199            stmt.end
200        )
201    def _perform_vex_stmt_Store(self, addr, data, endness, **kwargs):
202        pass
203
204    def _analyze_vex_stmt_Exit_guard(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
205    def _handle_vex_stmt_Exit(self, stmt: pyvex.stmt.Exit):
206        self._perform_vex_stmt_Exit(
207            self._analyze_vex_stmt_Exit_guard(stmt.guard),
208            self._handle_vex_const(stmt.dst),
209            stmt.jk
210        )
211    def _perform_vex_stmt_Exit(self, guard, target, jumpkind):
212        pass
213
214    def _analyze_vex_stmt_Dirty_arg(self, *a, **kw): return self._handle_vex_expr(*a, **kw)
215    def _handle_vex_stmt_Dirty(self, stmt: pyvex.stmt.Dirty):
216        return self._perform_vex_stmt_Dirty(
217            stmt.cee.name,
218            self.irsb.tyenv.lookup(stmt.tmp) if stmt.tmp not in (-1, 0xffffffff) else None,
219            stmt.tmp,
220            [self._analyze_vex_stmt_Dirty_arg(arg) for arg in stmt.args]
221        )
222    def _perform_vex_stmt_Dirty_wrtmp(self, *a, **kw): return self._perform_vex_stmt_WrTmp(*a, **kw)
223    def _perform_vex_stmt_Dirty(self, func_name, ty, tmp, args):
224        retval = self._perform_vex_stmt_Dirty_call(func_name, ty, args)
225        if tmp not in (-1, 0xffffffff):
226            self._perform_vex_stmt_Dirty_wrtmp(tmp, retval)
227    def _perform_vex_stmt_Dirty_call(self, func_name, ty, args, func=None):
228        return NotImplemented
229
230    # stmt category 3: weird load/store patterns implemented in terms of above
231
232    def _analyze_vex_stmt_PutI_ix(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
233    def _analyze_vex_stmt_PutI_data(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
234    def _handle_vex_stmt_PutI(self, stmt: pyvex.stmt.PutI):
235        self._perform_vex_stmt_PutI(
236            stmt.descr.base,
237            stmt.descr.elemTy,
238            stmt.bias,
239            self._analyze_vex_stmt_PutI_ix(stmt.ix),
240            stmt.descr.nElems,
241            self._analyze_vex_stmt_PutI_data(stmt.data)
242        )
243
244    def _perform_vex_stmt_PutI_compute(self, base, elemTy, bias, ix, nElems):
245        # base + ((bias + ix) % nElems) * elemSize
246        elemSize = self._ty_to_bytes(elemTy)
247        index = self._perform_vex_expr_Op('Iop_Add32', (self._handle_vex_const(pyvex.const.U32(bias)), ix))
248        big_index = self._perform_vex_expr_Op('Iop_32HLto64', (self._handle_vex_const(pyvex.const.U32(0)), index))
249        divmod_index = self._perform_vex_expr_Op('Iop_DivModU64to32', (big_index, self._handle_vex_const(pyvex.const.U32(nElems))))
250        mod_index = self._perform_vex_expr_Op('Iop_64HIto32', (divmod_index,))
251        offset = self._perform_vex_expr_Op('Iop_Mul32', (mod_index, self._handle_vex_const(pyvex.const.U32(elemSize))))
252        return self._perform_vex_expr_Op('Iop_Add32', (self._handle_vex_const(pyvex.const.U32(base)), offset))
253    def _perform_vex_stmt_PutI(self, base, elemSize, bias, ix, nElems, data):
254        offset = self._perform_vex_stmt_PutI_compute(base, elemSize, bias, ix, nElems)
255        self._perform_vex_stmt_Put(offset, data)
256
257    def _analyze_vex_stmt_LLSC_addr(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
258    def _analyze_vex_stmt_LLSC_storedata(self, *a, **kw): return self._handle_vex_expr(*a, **kw)
259    def _handle_vex_stmt_LLSC(self, stmt: pyvex.stmt.LLSC):
260        self._perform_vex_stmt_LLSC(
261            stmt.result,
262            self._analyze_vex_stmt_LLSC_addr(stmt.addr),
263            stmt.endness,
264            self._analyze_vex_stmt_LLSC_storedata(stmt.storedata) if stmt.storedata is not None else None,
265            self.irsb.tyenv.lookup(stmt.result))
266
267    def _perform_vex_stmt_LLSC_load(self, *a, **kw): return self. _perform_vex_expr_Load(*a, **kw)
268    def _perform_vex_stmt_LLSC_store(self, *a, **kw): return self. _perform_vex_stmt_Store(*a, **kw)
269    def _perform_vex_stmt_LLSC_wrtmp(self, *a, **kw): return self. _perform_vex_stmt_WrTmp(*a, **kw)
270    def _perform_vex_stmt_LLSC(self, result, addr, endness, storedata, ty):
271        if storedata is None:
272            load_result = self._perform_vex_stmt_LLSC_load(addr, ty, endness)
273            self._perform_vex_stmt_LLSC_wrtmp(result, load_result)
274        else:
275            self._perform_vex_stmt_LLSC_store(addr, storedata, endness)
276            self._perform_vex_stmt_LLSC_wrtmp(result, self._handle_vex_const(pyvex.const.U1(1)))
277
278    def _analyze_vex_stmt_LoadG_addr(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
279    def _analyze_vex_stmt_LoadG_alt(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
280    def _analyze_vex_stmt_LoadG_guard(self, *a, **kw): return self._handle_vex_expr(*a, **kw)
281    def _handle_vex_stmt_LoadG(self, stmt: pyvex.stmt.LoadG):
282        self._perform_vex_stmt_LoadG(
283            self._analyze_vex_stmt_LoadG_addr(stmt.addr),
284            self._analyze_vex_stmt_LoadG_alt(stmt.alt),
285            self._analyze_vex_stmt_LoadG_guard(stmt.guard),
286            stmt.dst,
287            stmt.cvt,
288            stmt.end)
289    def _perform_vex_stmt_LoadG_load(self, *a, **kw): return self. _perform_vex_expr_Load(*a, **kw)
290    def _perform_vex_stmt_LoadG_widen(self, *a, **kw): return self. _perform_vex_expr_Op(*a, **kw)
291    def _perform_vex_stmt_LoadG_ite(self, *a, **kw): return self. _perform_vex_expr_ITE(*a, **kw)
292    def _perform_vex_stmt_LoadG_wrtmp(self, *a, **kw): return self. _perform_vex_stmt_WrTmp(*a, **kw)
293    def _perform_vex_stmt_LoadG_guard_condition(self, guard): return guard == 1
294    def _perform_vex_stmt_LoadG(self, addr, alt, guard, dst, cvt, end):
295        cvt_properties = {
296            'ILGop_IdentV128': ('Ity_V128', None), # 128 bit vector, no conversion */
297            'ILGop_Ident64':   ('Ity_I64', None),   # 64 bit, no conversion */
298            'ILGop_Ident32':   ('Ity_I32', None),   # 32 bit, no conversion */
299            'ILGop_16Uto32':   ('Ity_I16', 'Iop_16Uto32'),   # 16 bit load, Z-widen to 32 */
300            'ILGop_16Sto32':   ('Ity_I16', 'Iop_16Sto32'),   # 16 bit load, S-widen to 32 */
301            'ILGop_8Uto32':    ('Ity_I8', 'Iop_8Uto32'),    # 8 bit load, Z-widen to 32 */
302            'ILGop_8Sto32':    ('Ity_I8', 'Iop_8Sto32')     # 8 bit load, S-widen to 32 */
303        }
304
305        # Because of how VEX's ARM lifter works, we may introduce non-existent register loads.
306        # Here is an example:
307        #
308        # .text:0800408C ITTTT MI
309        # .text:0800408E LDRMI   R2, =0x40020004
310        # .text:08004090 LDRMI   R3
311        #
312        # 116 | ------ IMark(0x800408e, 2, 1) ------
313        # 117 | t247 = Or32(t225,0x00000040)
314        # 118 | t254 = armg_calculate_condition(t247,t227,t229,t231):Ity_I32
315        # 119 | t262 = GET:I32(r2)
316        # 120 | t263 = CmpNE32(t254,0x00000000)
317        # 121 | t66 = if (t263) ILGop_Ident32(LDle(0x080040bc)) else t262
318        # 122 | PUT(r2) = t66
319        # 123 | PUT(pc) = 0x08004091
320        # 124 | ------ IMark(0x8004090, 2, 1) ------
321        # 125 | t280 = t263
322        # 126 | t73 = if (t280) ILGop_Ident32(LDle(t66)) else t222
323        #
324        # t280 == t263 == the condition inside t66. Now t66 looks like this:
325        #   <BV32 cond then 0x40020004 else reg_r2_861_32{UNINITIALIZED}>. since t280 is guarding the load from t66,
326        # if the load from t66 is not aware of the condition that t280 is True, we will end up reading from r2_861_32,
327        # which is not what the original instruction intended.
328        # Therefore, the load from t66 should be aware of the condition that t280 is True. Or even better, don't
329        # perform the read if the condition is evaluated to False.
330        # We can perform another optimization: Let this condition be cond. When cond can be evaluated to either True or
331        # False, we don't want to perform the read when the cond is the guard (which is a relatively cheap check) and
332        # is False. When the cond is True, we perform the read with only the intended address (instead of the entire
333        # guarded address). This way we get rid of the redundant load that should have existed in the first place.
334
335        ty, cvt_op = cvt_properties[cvt]
336        if self._is_false(guard):
337            self._perform_vex_stmt_LoadG_wrtmp(dst, alt)
338            return
339        load_result = self._perform_vex_stmt_LoadG_load(addr, ty, end,
340                                                        condition=self._perform_vex_stmt_LoadG_guard_condition(guard))
341        if cvt_op is None:
342            cvt_result = load_result
343        else:
344            cvt_result = self._perform_vex_stmt_LoadG_widen(cvt_op, (load_result,))
345        ite_result = self._perform_vex_stmt_LoadG_ite(guard, cvt_result, alt)
346        self._perform_vex_stmt_LoadG_wrtmp(dst, ite_result)
347
348    def _analyze_vex_stmt_StoreG_addr(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
349    def _analyze_vex_stmt_StoreG_data(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
350    def _analyze_vex_stmt_StoreG_guard(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
351    def _handle_vex_stmt_StoreG(self, stmt: pyvex.stmt.StoreG):
352        self._perform_vex_stmt_StoreG(
353            self._analyze_vex_stmt_StoreG_addr(stmt.addr),
354            self._analyze_vex_stmt_StoreG_data(stmt.data),
355            self._analyze_vex_stmt_StoreG_guard(stmt.guard),
356            stmt.data.result_type(self.irsb.tyenv),
357            stmt.end
358        )
359    def _perform_vex_stmt_StoreG_load(self, *a, **kw): return self. _perform_vex_expr_Load(*a, **kw)
360    def _perform_vex_stmt_StoreG_ite(self, *a, **kw): return self. _perform_vex_expr_ITE(*a, **kw)
361    def _perform_vex_stmt_StoreG_store(self, *a, **kw): return self. _perform_vex_stmt_Store(*a, **kw)
362    def _perform_vex_stmt_StoreG_guard_condition(self, guard): return guard == 1
363    def _perform_vex_stmt_StoreG(self, addr, data, guard, ty, endness, **kwargs):
364        # perform the same optimization as in _perform_vex_stmt_LoadG
365        if self._is_false(guard):
366            return
367        self._perform_vex_stmt_StoreG_store(addr, data, endness,
368                                            condition=self._perform_vex_stmt_StoreG_guard_condition(guard),
369                                            **kwargs)
370
371    def _analyze_vex_stmt_CAS_addr(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
372    def _analyze_vex_stmt_CAS_dataLo(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
373    def _analyze_vex_stmt_CAS_dataHi(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
374    def _analyze_vex_stmt_CAS_expdLo(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
375    def _analyze_vex_stmt_CAS_expdHi(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
376    def _handle_vex_stmt_CAS(self, stmt: pyvex.stmt.CAS):
377        self._perform_vex_stmt_CAS(
378            self._analyze_vex_stmt_CAS_addr(stmt.addr),
379            self._analyze_vex_stmt_CAS_dataLo(stmt.dataLo),
380            self._analyze_vex_stmt_CAS_dataHi(stmt.dataHi) if stmt.dataHi is not None else None,
381            self._analyze_vex_stmt_CAS_expdLo(stmt.expdLo),
382            self._analyze_vex_stmt_CAS_expdHi(stmt.expdHi) if stmt.expdHi is not None else None,
383            stmt.oldLo,
384            stmt.oldHi,
385            stmt.endness,
386            stmt.expdLo.result_type(self.irsb.tyenv)
387        )
388    def _perform_vex_stmt_CAS_load(self, *a, **kw): return self. _perform_vex_expr_Load(*a, **kw)
389    def _perform_vex_stmt_CAS_wrtmp(self, *a, **kw): return self. _perform_vex_stmt_WrTmp(*a, **kw)
390    def _perform_vex_stmt_CAS_cmp(self, *a, **kw): return self. _perform_vex_expr_Op(*a, **kw)
391    def _perform_vex_stmt_CAS_narrow(self, *a, **kw): return self. _perform_vex_expr_Op(*a, **kw)
392    def _perform_vex_stmt_CAS_widen(self, *a, **kw): return self. _perform_vex_expr_Op(*a, **kw)
393    def _perform_vex_stmt_CAS_storeg(self, *a, **kw): return self. _perform_vex_stmt_StoreG(*a, **kw)
394    def _perform_vex_stmt_CAS(self, addr, dataLo, dataHi, expdLo, expdHi, oldLo, oldHi, endness, ty):
395        # - load mem
396        # - compare
397        # - store mem conditional
398        # - store tmp
399        double = dataHi is not None
400        if double:
401            ty, narrow_lo_op, narrow_hi_op, widen_op = {
402                'Ity_I8':  ('Ity_I16', 'Iop_16to8', 'Iop_16Hito8', 'Iop_8HLto16'),
403                'Ity_I16': ('Ity_I32', 'Iop_32to16', 'Iop_32HIto16', 'Iop_16HLto32'),
404                'Ity_I32': ('Ity_I64', 'Iop_64to32', 'Iop_64HIto32', 'Iop_32HLto64'),
405                'Ity_I64': ('Ity_V128', 'Iop_128to64', 'Iop_128HIto64', 'Iop_64HLto128'),
406            }[ty]
407            data = self._perform_vex_stmt_CAS_widen(widen_op, (dataHi, dataLo))
408            expd = self._perform_vex_stmt_CAS_widen(widen_op, (expdHi, expdLo))
409        else:
410            narrow_lo_op = narrow_hi_op = None
411            data = dataLo
412            expd = expdLo
413
414        cmp_op = {
415            'Ity_I8':  'Iop_CmpEQ8',
416            'Ity_I16': 'Iop_CmpEQ16',
417            'Ity_I32': 'Iop_CmpEQ32',
418            'Ity_I64': 'Iop_CmpEQ64',
419            'Ity_V128': 'Iop_CmpEQ128',
420        }[ty]
421
422        val = self._perform_vex_stmt_CAS_load(addr, ty, endness)
423        cmp = self._perform_vex_stmt_CAS_cmp(cmp_op, (val, expd))
424        self._perform_vex_stmt_CAS_storeg(addr, data, cmp, ty, endness)
425
426        if double:
427            valHi = self._perform_vex_stmt_CAS_narrow(narrow_hi_op, (val,))
428            valLo = self._perform_vex_stmt_CAS_narrow(narrow_lo_op, (val,))
429
430            self._perform_vex_stmt_CAS_wrtmp(oldLo, valLo)
431            self._perform_vex_stmt_CAS_wrtmp(oldHi, valHi)
432        else:
433            self._perform_vex_stmt_CAS_wrtmp(oldLo, val)
434
435    #
436    # block level handling
437    #
438
439    def _analyze_vex_defaultexit(self, *a, **kw): return self. _handle_vex_expr(*a, **kw)
440    def handle_vex_block(self, irsb: pyvex.IRSB):
441        self.irsb = irsb
442        self.tmps = [None]*self.irsb.tyenv.types_used
443
444        for stmt_idx, stmt in enumerate(irsb.statements):
445            self.stmt_idx = stmt_idx
446            self._handle_vex_stmt(stmt)
447        self.stmt_idx = DEFAULT_STATEMENT
448        self._handle_vex_defaultexit(irsb.next, irsb.jumpkind)
449
450    def _handle_vex_defaultexit(self, expr: Optional[pyvex.expr.IRExpr], jumpkind: str):
451        self._perform_vex_defaultexit(
452            self._analyze_vex_defaultexit(expr) if expr is not None else None,
453            jumpkind
454        )
455
456    def _perform_vex_defaultexit(self, expr, jumpkind):
457        pass
458