1from typing import Dict, Optional 2 3import claripy 4import logging 5from archinfo.arch_arm import is_arm_arch 6from angr.state_plugins.sim_action_object import _raw_ast, SimActionObject 7 8l = logging.getLogger(name=__name__) 9#l.setLevel(logging.DEBUG) 10 11# pylint: disable=R0911 12# pylint: disable=W0613 13# pylint: disable=W0612 14# pylint: disable=invalid-unary-operand-type 15 16############### 17### Helpers ### 18############### 19 20# There might be a better way of doing this 21def calc_paritybit(p, msb=7, lsb=0): 22 if len(p) > msb: 23 p_part = p[msb:lsb] 24 else: 25 p_part = p 26 27 b = claripy.BVV(1, 1) 28 for i in range(p_part.size()): 29 b = b ^ p_part[i] 30 return b 31 32def _cond_flag(condition): 33 return claripy.If(condition, claripy.BVV(1, 1), claripy.BVV(0, 1)) 34 35def calc_zerobit(p): 36 return _cond_flag(p == 0) 37 38def boolean_extend(O, a, b, size): 39 return claripy.If(O(a, b), claripy.BVV(1, size), claripy.BVV(0, size)) 40 41def op_concretize(op): 42 if type(op) is int: 43 return op 44 op = op.ite_excavated 45 if op.op == 'If': 46 cases = list(claripy.reverse_ite_cases(op)) 47 if all(c.op == 'BVV' for _, c in cases): 48 raise CCallMultivaluedException(cases) 49 if op.op != 'BVV': 50 raise SimError("Hit a symbolic conditional operation. Something has gone wildly wrong.") 51 return op.args[0] 52 53def strip_simaction(val): 54 if type(val) == SimActionObject: 55 return _raw_ast(val) 56 return val 57 58class CCallMultivaluedException(Exception): 59 pass 60 61################## 62### x86* data ### 63################## 64 65data = { 66 'AMD64': { 67 'CondTypes': { }, 68 'CondBitOffsets': { }, 69 'CondBitMasks': { }, 70 'OpTypes': { }, 71 }, 'X86': { 72 'CondTypes': { }, 73 'CondBitOffsets': { }, 74 'CondBitMasks': { }, 75 'OpTypes': { }, 76 } 77} # type: Dict[str, Dict[str, Dict[str, Optional[int]]]] 78 79# condition types 80data['AMD64']['CondTypes']['CondO'] = 0 # /* overflow */ 81data['AMD64']['CondTypes']['CondNO'] = 1 # /* no overflow */ 82data['AMD64']['CondTypes']['CondB'] = 2 # /* below */ 83data['AMD64']['CondTypes']['CondNB'] = 3 # /* not below */ 84data['AMD64']['CondTypes']['CondZ'] = 4 # /* zero */ 85data['AMD64']['CondTypes']['CondNZ'] = 5 # /* not zero */ 86data['AMD64']['CondTypes']['CondBE'] = 6 # /* below or equal */ 87data['AMD64']['CondTypes']['CondNBE'] = 7 # /* not below or equal */ 88data['AMD64']['CondTypes']['CondS'] = 8 # /* negative */ 89data['AMD64']['CondTypes']['CondNS'] = 9 # /* not negative */ 90data['AMD64']['CondTypes']['CondP'] = 10 # /* parity even */ 91data['AMD64']['CondTypes']['CondNP'] = 11 # /* not parity even */ 92data['AMD64']['CondTypes']['CondL'] = 12 # /* jump less */ 93data['AMD64']['CondTypes']['CondNL'] = 13 # /* not less */ 94data['AMD64']['CondTypes']['CondLE'] = 14 # /* less or equal */ 95data['AMD64']['CondTypes']['CondNLE'] = 15 # /* not less or equal */ 96 97# condition bit offsets 98data['AMD64']['CondBitOffsets']['G_CC_SHIFT_O'] = 11 99data['AMD64']['CondBitOffsets']['G_CC_SHIFT_S'] = 7 100data['AMD64']['CondBitOffsets']['G_CC_SHIFT_Z'] = 6 101data['AMD64']['CondBitOffsets']['G_CC_SHIFT_A'] = 4 102data['AMD64']['CondBitOffsets']['G_CC_SHIFT_C'] = 0 103data['AMD64']['CondBitOffsets']['G_CC_SHIFT_P'] = 2 104 105# masks 106data['AMD64']['CondBitMasks']['G_CC_MASK_O'] = (1 << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_O']) 107data['AMD64']['CondBitMasks']['G_CC_MASK_S'] = (1 << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_S']) 108data['AMD64']['CondBitMasks']['G_CC_MASK_Z'] = (1 << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_Z']) 109data['AMD64']['CondBitMasks']['G_CC_MASK_A'] = (1 << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_A']) 110data['AMD64']['CondBitMasks']['G_CC_MASK_C'] = (1 << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_C']) 111data['AMD64']['CondBitMasks']['G_CC_MASK_P'] = (1 << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_P']) 112 113# operation types 114data['AMD64']['OpTypes']['G_CC_OP_COPY'] = 0 115data['AMD64']['OpTypes']['G_CC_OP_ADDB'] = 1 116data['AMD64']['OpTypes']['G_CC_OP_ADDW'] = 2 117data['AMD64']['OpTypes']['G_CC_OP_ADDL'] = 3 118data['AMD64']['OpTypes']['G_CC_OP_ADDQ'] = 4 119data['AMD64']['OpTypes']['G_CC_OP_SUBB'] = 5 120data['AMD64']['OpTypes']['G_CC_OP_SUBW'] = 6 121data['AMD64']['OpTypes']['G_CC_OP_SUBL'] = 7 122data['AMD64']['OpTypes']['G_CC_OP_SUBQ'] = 8 123data['AMD64']['OpTypes']['G_CC_OP_ADCB'] = 9 124data['AMD64']['OpTypes']['G_CC_OP_ADCW'] = 10 125data['AMD64']['OpTypes']['G_CC_OP_ADCL'] = 11 126data['AMD64']['OpTypes']['G_CC_OP_ADCQ'] = 12 127data['AMD64']['OpTypes']['G_CC_OP_SBBB'] = 13 128data['AMD64']['OpTypes']['G_CC_OP_SBBW'] = 14 129data['AMD64']['OpTypes']['G_CC_OP_SBBL'] = 15 130data['AMD64']['OpTypes']['G_CC_OP_SBBQ'] = 16 131data['AMD64']['OpTypes']['G_CC_OP_LOGICB'] = 17 132data['AMD64']['OpTypes']['G_CC_OP_LOGICW'] = 18 133data['AMD64']['OpTypes']['G_CC_OP_LOGICL'] = 19 134data['AMD64']['OpTypes']['G_CC_OP_LOGICQ'] = 20 135data['AMD64']['OpTypes']['G_CC_OP_INCB'] = 21 136data['AMD64']['OpTypes']['G_CC_OP_INCW'] = 22 137data['AMD64']['OpTypes']['G_CC_OP_INCL'] = 23 138data['AMD64']['OpTypes']['G_CC_OP_INCQ'] = 24 139data['AMD64']['OpTypes']['G_CC_OP_DECB'] = 25 140data['AMD64']['OpTypes']['G_CC_OP_DECW'] = 26 141data['AMD64']['OpTypes']['G_CC_OP_DECL'] = 27 142data['AMD64']['OpTypes']['G_CC_OP_DECQ'] = 28 143data['AMD64']['OpTypes']['G_CC_OP_SHLB'] = 29 144data['AMD64']['OpTypes']['G_CC_OP_SHLW'] = 30 145data['AMD64']['OpTypes']['G_CC_OP_SHLL'] = 31 146data['AMD64']['OpTypes']['G_CC_OP_SHLQ'] = 32 147data['AMD64']['OpTypes']['G_CC_OP_SHRB'] = 33 148data['AMD64']['OpTypes']['G_CC_OP_SHRW'] = 34 149data['AMD64']['OpTypes']['G_CC_OP_SHRL'] = 35 150data['AMD64']['OpTypes']['G_CC_OP_SHRQ'] = 36 151data['AMD64']['OpTypes']['G_CC_OP_ROLB'] = 37 152data['AMD64']['OpTypes']['G_CC_OP_ROLW'] = 38 153data['AMD64']['OpTypes']['G_CC_OP_ROLL'] = 39 154data['AMD64']['OpTypes']['G_CC_OP_ROLQ'] = 40 155data['AMD64']['OpTypes']['G_CC_OP_RORB'] = 41 156data['AMD64']['OpTypes']['G_CC_OP_RORW'] = 42 157data['AMD64']['OpTypes']['G_CC_OP_RORL'] = 43 158data['AMD64']['OpTypes']['G_CC_OP_RORQ'] = 44 159data['AMD64']['OpTypes']['G_CC_OP_UMULB'] = 45 160data['AMD64']['OpTypes']['G_CC_OP_UMULW'] = 46 161data['AMD64']['OpTypes']['G_CC_OP_UMULL'] = 47 162data['AMD64']['OpTypes']['G_CC_OP_UMULQ'] = 48 163data['AMD64']['OpTypes']['G_CC_OP_SMULB'] = 49 164data['AMD64']['OpTypes']['G_CC_OP_SMULW'] = 50 165data['AMD64']['OpTypes']['G_CC_OP_SMULL'] = 51 166data['AMD64']['OpTypes']['G_CC_OP_SMULQ'] = 52 167data['AMD64']['OpTypes']['G_CC_OP_ANDN32'] = 53 168data['AMD64']['OpTypes']['G_CC_OP_ANDN64'] = 54 169data['AMD64']['OpTypes']['G_CC_OP_BLSI32'] = 55 170data['AMD64']['OpTypes']['G_CC_OP_BLSI64'] = 56 171data['AMD64']['OpTypes']['G_CC_OP_BLSMSK32'] = 57 172data['AMD64']['OpTypes']['G_CC_OP_BLSMSK64'] = 58 173data['AMD64']['OpTypes']['G_CC_OP_BLSR32'] = 59 174data['AMD64']['OpTypes']['G_CC_OP_BLSR64'] = 60 175data['AMD64']['OpTypes']['G_CC_OP_ADCXL'] = 61 176data['AMD64']['OpTypes']['G_CC_OP_ADCXQ'] = 62 177data['AMD64']['OpTypes']['G_CC_OP_ADOXL'] = 63 178data['AMD64']['OpTypes']['G_CC_OP_ADOXQ'] = 64 179data['AMD64']['OpTypes']['G_CC_OP_NUMBER'] = 65 180 181data['X86']['CondTypes']['CondO'] = 0 182data['X86']['CondTypes']['CondNO'] = 1 183data['X86']['CondTypes']['CondB'] = 2 184data['X86']['CondTypes']['CondNB'] = 3 185data['X86']['CondTypes']['CondZ'] = 4 186data['X86']['CondTypes']['CondNZ'] = 5 187data['X86']['CondTypes']['CondBE'] = 6 188data['X86']['CondTypes']['CondNBE'] = 7 189data['X86']['CondTypes']['CondS'] = 8 190data['X86']['CondTypes']['CondNS'] = 9 191data['X86']['CondTypes']['CondP'] = 10 192data['X86']['CondTypes']['CondNP'] = 11 193data['X86']['CondTypes']['CondL'] = 12 194data['X86']['CondTypes']['CondNL'] = 13 195data['X86']['CondTypes']['CondLE'] = 14 196data['X86']['CondTypes']['CondNLE'] = 15 197data['X86']['CondTypes']['CondAlways'] = 16 198 199data['X86']['CondBitOffsets']['G_CC_SHIFT_O'] = 11 200data['X86']['CondBitOffsets']['G_CC_SHIFT_S'] = 7 201data['X86']['CondBitOffsets']['G_CC_SHIFT_Z'] = 6 202data['X86']['CondBitOffsets']['G_CC_SHIFT_A'] = 4 203data['X86']['CondBitOffsets']['G_CC_SHIFT_C'] = 0 204data['X86']['CondBitOffsets']['G_CC_SHIFT_P'] = 2 205 206# masks 207data['X86']['CondBitMasks']['G_CC_MASK_O'] = (1 << data['X86']['CondBitOffsets']['G_CC_SHIFT_O']) 208data['X86']['CondBitMasks']['G_CC_MASK_S'] = (1 << data['X86']['CondBitOffsets']['G_CC_SHIFT_S']) 209data['X86']['CondBitMasks']['G_CC_MASK_Z'] = (1 << data['X86']['CondBitOffsets']['G_CC_SHIFT_Z']) 210data['X86']['CondBitMasks']['G_CC_MASK_A'] = (1 << data['X86']['CondBitOffsets']['G_CC_SHIFT_A']) 211data['X86']['CondBitMasks']['G_CC_MASK_C'] = (1 << data['X86']['CondBitOffsets']['G_CC_SHIFT_C']) 212data['X86']['CondBitMasks']['G_CC_MASK_P'] = (1 << data['X86']['CondBitOffsets']['G_CC_SHIFT_P']) 213 214data['X86']['OpTypes']['G_CC_OP_COPY'] = 0 215data['X86']['OpTypes']['G_CC_OP_ADDB'] = 1 216data['X86']['OpTypes']['G_CC_OP_ADDW'] = 2 217data['X86']['OpTypes']['G_CC_OP_ADDL'] = 3 218data['X86']['OpTypes']['G_CC_OP_SUBB'] = 4 219data['X86']['OpTypes']['G_CC_OP_SUBW'] = 5 220data['X86']['OpTypes']['G_CC_OP_SUBL'] = 6 221data['X86']['OpTypes']['G_CC_OP_ADCB'] = 7 222data['X86']['OpTypes']['G_CC_OP_ADCW'] = 8 223data['X86']['OpTypes']['G_CC_OP_ADCL'] = 9 224data['X86']['OpTypes']['G_CC_OP_SBBB'] = 10 225data['X86']['OpTypes']['G_CC_OP_SBBW'] = 11 226data['X86']['OpTypes']['G_CC_OP_SBBL'] = 12 227data['X86']['OpTypes']['G_CC_OP_LOGICB'] = 13 228data['X86']['OpTypes']['G_CC_OP_LOGICW'] = 14 229data['X86']['OpTypes']['G_CC_OP_LOGICL'] = 15 230data['X86']['OpTypes']['G_CC_OP_INCB'] = 16 231data['X86']['OpTypes']['G_CC_OP_INCW'] = 17 232data['X86']['OpTypes']['G_CC_OP_INCL'] = 18 233data['X86']['OpTypes']['G_CC_OP_DECB'] = 19 234data['X86']['OpTypes']['G_CC_OP_DECW'] = 20 235data['X86']['OpTypes']['G_CC_OP_DECL'] = 21 236data['X86']['OpTypes']['G_CC_OP_SHLB'] = 22 237data['X86']['OpTypes']['G_CC_OP_SHLW'] = 23 238data['X86']['OpTypes']['G_CC_OP_SHLL'] = 24 239data['X86']['OpTypes']['G_CC_OP_SHRB'] = 25 240data['X86']['OpTypes']['G_CC_OP_SHRW'] = 26 241data['X86']['OpTypes']['G_CC_OP_SHRL'] = 27 242data['X86']['OpTypes']['G_CC_OP_ROLB'] = 28 243data['X86']['OpTypes']['G_CC_OP_ROLW'] = 29 244data['X86']['OpTypes']['G_CC_OP_ROLL'] = 30 245data['X86']['OpTypes']['G_CC_OP_RORB'] = 31 246data['X86']['OpTypes']['G_CC_OP_RORW'] = 32 247data['X86']['OpTypes']['G_CC_OP_RORL'] = 33 248data['X86']['OpTypes']['G_CC_OP_UMULB'] = 34 249data['X86']['OpTypes']['G_CC_OP_UMULW'] = 35 250data['X86']['OpTypes']['G_CC_OP_UMULL'] = 36 251data['X86']['OpTypes']['G_CC_OP_SMULB'] = 37 252data['X86']['OpTypes']['G_CC_OP_SMULW'] = 38 253data['X86']['OpTypes']['G_CC_OP_SMULL'] = 39 254data['X86']['OpTypes']['G_CC_OP_NUMBER'] = 40 255 256data['X86']['OpTypes']['G_CC_OP_SMULQ'] = None 257data['X86']['OpTypes']['G_CC_OP_UMULQ'] = None 258data['X86']['OpTypes']['G_CC_OP_RORQ'] = None 259data['X86']['OpTypes']['G_CC_OP_ROLQ'] = None 260data['X86']['OpTypes']['G_CC_OP_SHRQ'] = None 261data['X86']['OpTypes']['G_CC_OP_SHLQ'] = None 262data['X86']['OpTypes']['G_CC_OP_DECQ'] = None 263data['X86']['OpTypes']['G_CC_OP_INCQ'] = None 264data['X86']['OpTypes']['G_CC_OP_LOGICQ'] = None 265data['X86']['OpTypes']['G_CC_OP_SBBQ'] = None 266data['X86']['OpTypes']['G_CC_OP_ADCQ'] = None 267data['X86']['OpTypes']['G_CC_OP_SUBQ'] = None 268data['X86']['OpTypes']['G_CC_OP_ADDQ'] = None 269 270data_inverted = { k_arch: { k_data_class: {y:x for (x,y) in d_data_class.items()} for k_data_class, d_data_class in d_arch.items() } for k_arch,d_arch in data.items() } 271 272data['AMD64']['size'] = 64 273data['X86']['size'] = 32 274 275# 276# AMD64 internal helpers 277# 278def pc_preamble(nbits): 279 data_mask = claripy.BVV(2 ** nbits - 1, nbits) 280 sign_mask = 1 << (nbits - 1) 281 return data_mask, sign_mask 282 283def pc_make_rdata(nbits, cf, pf, af, zf, sf, of, platform=None): 284 return cf, pf, af, zf, sf, of 285 286def pc_make_rdata_if_necessary(nbits, cf, pf, af, zf, sf, of, platform=None): 287 vec = [(data[platform]['CondBitOffsets']['G_CC_SHIFT_C'], cf), 288 (data[platform]['CondBitOffsets']['G_CC_SHIFT_P'], pf), 289 (data[platform]['CondBitOffsets']['G_CC_SHIFT_A'], af), 290 (data[platform]['CondBitOffsets']['G_CC_SHIFT_Z'], zf), 291 (data[platform]['CondBitOffsets']['G_CC_SHIFT_S'], sf), 292 (data[platform]['CondBitOffsets']['G_CC_SHIFT_O'], of)] 293 vec.sort(reverse=True) 294 return _concat_flags(nbits, vec) 295 296def pc_actions_ADD(state, nbits, arg_l, arg_r, cc_ndep, platform=None): 297 data_mask, sign_mask = pc_preamble(nbits) 298 res = arg_l + arg_r 299 300 cf = claripy.If(claripy.ULT(res, arg_l), claripy.BVV(1, 1), claripy.BVV(0, 1)) 301 pf = calc_paritybit(res) 302 af = (res ^ arg_l ^ arg_r)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']] 303 zf = calc_zerobit(res) 304 sf = res[nbits - 1:nbits - 1] 305 of = ((arg_l ^ arg_r ^ data_mask) & (arg_l ^ res))[nbits - 1:nbits - 1] 306 307 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 308 309def pc_actions_SUB(state, nbits, arg_l, arg_r, cc_ndep, platform=None): 310 res = arg_l - arg_r 311 312 cf = claripy.If(claripy.ULT(arg_l, arg_r), claripy.BVV(1, 1), claripy.BVV(0, 1)) 313 pf = calc_paritybit(res) 314 af = (res ^ arg_l ^ arg_r)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']] 315 zf = calc_zerobit(res) 316 sf = res[nbits - 1:nbits - 1] 317 of = ((arg_l ^ arg_r) & (arg_l ^ res))[nbits - 1:nbits - 1] 318 319 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 320 321def pc_actions_LOGIC(state, nbits, arg_l, arg_r, cc_ndep, platform=None): 322 cf = claripy.BVV(0, 1) 323 pf = calc_paritybit(arg_l) 324 af = claripy.BVV(0, 1) 325 zf = calc_zerobit(arg_l) 326 sf = arg_l[nbits-1] 327 of = claripy.BVV(0, 1) 328 329 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 330 331def pc_actions_DEC(state, nbits, res, _, cc_ndep, platform=None): 332 arg_l = res + 1 333 arg_r = 1 334 335 cf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_C'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_C']] 336 pf = calc_paritybit(res) 337 af = (res ^ arg_l ^ 1)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']] 338 zf = calc_zerobit(res) 339 sf = res[nbits-1] 340 of = claripy.If(sf == arg_l[nbits-1], claripy.BVV(0, 1), claripy.BVV(1, 1)) 341 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 342 343def pc_actions_ADC(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None): 344 old_c = cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_C'] 345 arg_l = cc_dep1 346 arg_r = cc_dep2 ^ old_c 347 res = (arg_l + arg_r) + old_c 348 349 cf = claripy.If( 350 old_c != 0, 351 claripy.If(res <= arg_l, claripy.BVV(1, 1), claripy.BVV(0, 1)), 352 claripy.If(res < arg_l, claripy.BVV(1, 1), claripy.BVV(0, 1)) 353 ) 354 pf = calc_paritybit(res) 355 af = (res ^ arg_l ^ arg_r)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']] 356 zf = calc_zerobit(res) 357 sf = res[nbits - 1] 358 of = ((arg_l ^ arg_r ^ -1) & (arg_l ^ res))[nbits-1] 359 360 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 361 362def pc_actions_ADCX(state, nbits, cc_dep1, cc_dep2, cc_ndep, is_adc, platform=None): 363 pf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_P'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_P']] 364 af = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_A'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']] 365 zf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_Z'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_Z']] 366 sf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_S'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_S']] 367 if is_adc: 368 carry = claripy.LShR(cc_ndep, data[platform]['CondBitOffsets']['G_CC_SHIFT_C']) & 1 369 of = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_O'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_O']] 370 else: 371 carry = claripy.LShR(cc_ndep, data[platform]['CondBitOffsets']['G_CC_SHIFT_O']) & 1 372 cf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_C'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_C']] 373 arg_l = cc_dep1 374 arg_r = cc_dep2 ^ carry 375 res = (arg_l + arg_r) + carry 376 377 carry = claripy.If( 378 carry != 0, 379 claripy.If(res <= arg_l, claripy.BVV(1, 1), claripy.BVV(0, 1)), 380 claripy.If(res < arg_l, claripy.BVV(1, 1), claripy.BVV(0, 1)) 381 ) 382 if is_adc: 383 cf = carry 384 else: 385 of = carry 386 387 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 388 389def pc_actions_ANDN(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None): 390 cf = claripy.BVV(0, 1) 391 pf = claripy.BVV(0, 1) 392 af = claripy.BVV(0, 1) 393 of = claripy.BVV(0, 1) 394 zf = _cond_flag(cc_dep1 == 0) 395 sf = cc_dep1[nbits - 1] 396 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 397 398def pc_actions_BLSI(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None): 399 pf = claripy.BVV(0, 1) 400 af = claripy.BVV(0, 1) 401 of = claripy.BVV(0, 1) 402 cf = _cond_flag(cc_dep2 != 0) 403 zf = _cond_flag(cc_dep1 == 0) 404 sf = cc_dep1[nbits - 1] 405 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 406 407def pc_actions_BLSMSK(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None): 408 pf = claripy.BVV(0, 1) 409 af = claripy.BVV(0, 1) 410 of = claripy.BVV(0, 1) 411 zf = claripy.BVV(0, 1) 412 cf = _cond_flag(cc_dep2 == 0) 413 sf = cc_dep1[nbits - 1] 414 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 415 416def pc_actions_BLSR(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None): 417 pf = claripy.BVV(0, 1) 418 af = claripy.BVV(0, 1) 419 of = claripy.BVV(0, 1) 420 cf = _cond_flag(cc_dep2 == 0) 421 zf = _cond_flag(cc_dep1 == 0) 422 sf = cc_dep1[nbits - 1] 423 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 424 425def pc_actions_SBB(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None): 426 old_c = cc_ndep[data[platform]['CondBitOffsets']['G_CC_SHIFT_C']].zero_extend(nbits-1) 427 arg_l = cc_dep1 428 arg_r = cc_dep2 ^ old_c 429 res = (arg_l - arg_r) - old_c 430 431 cf_c = claripy.If(claripy.ULE(arg_l, arg_r), claripy.BVV(1, 1), claripy.BVV(0, 1)) 432 cf_noc = claripy.If(claripy.ULT(arg_l, arg_r), claripy.BVV(1, 1), claripy.BVV(0, 1)) 433 cf = claripy.If(old_c == 1, cf_c, cf_noc) 434 pf = calc_paritybit(res) 435 af = (res ^ arg_l ^ arg_r)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']] 436 zf = calc_zerobit(res) 437 sf = res[nbits-1] 438 of = ((arg_l ^ arg_r) & (arg_l ^ res))[nbits-1] 439 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 440 441def pc_actions_INC(state, nbits, res, _, cc_ndep, platform=None): 442 arg_l = res - 1 443 arg_r = 1 444 445 cf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_C'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_C']] 446 pf = calc_paritybit(res) 447 af = (res ^ arg_l ^ 1)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']] 448 zf = calc_zerobit(res) 449 sf = res[nbits-1] 450 of = claripy.If(sf == arg_l[nbits-1], claripy.BVV(0, 1), claripy.BVV(1, 1)) 451 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 452 453def pc_actions_SHL(state, nbits, remaining, shifted, cc_ndep, platform=None): 454 cf = ((remaining >> (nbits - 1)) & data[platform]['CondBitMasks']['G_CC_MASK_C'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_C']] 455 pf = calc_paritybit(remaining[7:0]) 456 af = claripy.BVV(0, 1) 457 zf = calc_zerobit(remaining) 458 sf = remaining[nbits-1] 459 of = (remaining[0] ^ shifted[0])[0] 460 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 461 462def pc_actions_SHR(state, nbits, remaining, shifted, cc_ndep, platform=None): 463 cf = claripy.If(shifted & 1 != 0, claripy.BVV(1, 1), claripy.BVV(0, 1)) 464 pf = calc_paritybit(remaining[7:0]) 465 af = claripy.BVV(0, 1) 466 zf = calc_zerobit(remaining) 467 sf = remaining[nbits-1] 468 of = (remaining[0] ^ shifted[0])[0] 469 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 470 471def pc_actions_ROL(state, nbits, res, _, cc_ndep, platform=None): 472 cf = res[0] 473 pf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_P'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_P']] 474 af = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_A'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']] 475 zf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_Z'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_Z']] 476 sf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_S'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_S']] 477 of = (claripy.LShR(res, nbits-1) ^ res)[0] 478 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 479 480def pc_actions_ROR(state, nbits, res, _, cc_ndep, platform=None): 481 cf = res[nbits-1] 482 pf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_P'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_P']] 483 af = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_A'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']] 484 zf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_Z'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_Z']] 485 sf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_S'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_S']] 486 of = (res[nbits-1] ^ res[nbits-2]) 487 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 488 489def pc_actions_UMUL(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None): 490 lo = (cc_dep1 * cc_dep2)[nbits - 1:0] 491 rr = lo 492 hi = (rr >> nbits)[nbits - 1:0] 493 cf = claripy.If(hi != 0, claripy.BVV(1, 1), claripy.BVV(0, 1)) 494 zf = calc_zerobit(lo) 495 pf = calc_paritybit(lo) 496 af = claripy.BVV(0, 1) 497 sf = lo[nbits - 1] 498 of = cf 499 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 500 501def pc_actions_UMULQ(*args, **kwargs): 502 l.error("Unsupported flag action UMULQ") 503 raise SimCCallError("Unsupported flag action. Please implement or bug Yan.") 504 505def pc_actions_SMUL(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None): 506 lo = (cc_dep1 * cc_dep2)[nbits - 1:0] 507 rr = lo 508 hi = (rr >> nbits)[nbits - 1:0] 509 cf = claripy.If(hi != (lo >> (nbits - 1)), claripy.BVV(1, 1), claripy.BVV(0, 1)) 510 zf = calc_zerobit(lo) 511 pf = calc_paritybit(lo) 512 af = claripy.BVV(0, 1) 513 sf = lo[nbits - 1] 514 of = cf 515 return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform) 516 517def pc_actions_SMULQ(*args, **kwargs): 518 l.error("Unsupported flag action SMULQ") 519 raise SimCCallError("Unsupported flag action. Please implement or bug Yan.") 520 521 522 523def pc_calculate_rdata_all_WRK(state, cc_op, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=None): 524 # sanity check 525 cc_op = op_concretize(cc_op) 526 527 if cc_op == data[platform]['OpTypes']['G_CC_OP_COPY']: 528 l.debug("cc_op == data[platform]['OpTypes']['G_CC_OP_COPY']") 529 return cc_dep1_formal & (data[platform]['CondBitMasks']['G_CC_MASK_O'] | data[platform]['CondBitMasks']['G_CC_MASK_S'] | data[platform]['CondBitMasks']['G_CC_MASK_Z'] 530 | data[platform]['CondBitMasks']['G_CC_MASK_A'] | data[platform]['CondBitMasks']['G_CC_MASK_C'] | data[platform]['CondBitMasks']['G_CC_MASK_P']) 531 532 cc_str = data_inverted[platform]['OpTypes'][cc_op] 533 534 nbits = _get_nbits(cc_str) 535 l.debug("nbits == %d", nbits) 536 537 cc_dep1_formal = cc_dep1_formal[nbits-1:0] 538 cc_dep2_formal = cc_dep2_formal[nbits-1:0] 539 cc_ndep_formal = cc_ndep_formal[nbits-1:0] 540 541 if cc_str in [ 'G_CC_OP_ADDB', 'G_CC_OP_ADDW', 'G_CC_OP_ADDL', 'G_CC_OP_ADDQ' ]: 542 l.debug("cc_str: ADD") 543 return pc_actions_ADD(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 544 545 if cc_str in [ 'G_CC_OP_ADCB', 'G_CC_OP_ADCW', 'G_CC_OP_ADCL', 'G_CC_OP_ADCQ' ]: 546 l.debug("cc_str: ADC") 547 return pc_actions_ADC(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 548 549 if cc_str in [ 'G_CC_OP_SUBB', 'G_CC_OP_SUBW', 'G_CC_OP_SUBL', 'G_CC_OP_SUBQ' ]: 550 l.debug("cc_str: SUB") 551 return pc_actions_SUB(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 552 553 if cc_str in [ 'G_CC_OP_SBBB', 'G_CC_OP_SBBW', 'G_CC_OP_SBBL', 'G_CC_OP_SBBQ' ]: 554 l.debug("cc_str: SBB") 555 return pc_actions_SBB(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 556 557 if cc_str in [ 'G_CC_OP_LOGICB', 'G_CC_OP_LOGICW', 'G_CC_OP_LOGICL', 'G_CC_OP_LOGICQ' ]: 558 l.debug("cc_str: LOGIC") 559 return pc_actions_LOGIC(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 560 561 if cc_str in [ 'G_CC_OP_INCB', 'G_CC_OP_INCW', 'G_CC_OP_INCL', 'G_CC_OP_INCQ' ]: 562 l.debug("cc_str: INC") 563 return pc_actions_INC(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 564 565 if cc_str in [ 'G_CC_OP_DECB', 'G_CC_OP_DECW', 'G_CC_OP_DECL', 'G_CC_OP_DECQ' ]: 566 l.debug("cc_str: DEC") 567 return pc_actions_DEC(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 568 569 if cc_str in [ 'G_CC_OP_SHLB', 'G_CC_OP_SHLW', 'G_CC_OP_SHLL', 'G_CC_OP_SHLQ' ]: 570 l.debug("cc_str: SHL") 571 return pc_actions_SHL(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 572 573 if cc_str in [ 'G_CC_OP_SHRB', 'G_CC_OP_SHRW', 'G_CC_OP_SHRL', 'G_CC_OP_SHRQ' ]: 574 l.debug("cc_str: SHR") 575 return pc_actions_SHR(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 576 577 if cc_str in [ 'G_CC_OP_ROLB', 'G_CC_OP_ROLW', 'G_CC_OP_ROLL', 'G_CC_OP_ROLQ' ]: 578 l.debug("cc_str: ROL") 579 return pc_actions_ROL(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 580 581 if cc_str in [ 'G_CC_OP_RORB', 'G_CC_OP_RORW', 'G_CC_OP_RORL', 'G_CC_OP_RORQ' ]: 582 l.debug("cc_str: ROR") 583 return pc_actions_ROR(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 584 585 if cc_str in [ 'G_CC_OP_UMULB', 'G_CC_OP_UMULW', 'G_CC_OP_UMULL', 'G_CC_OP_UMULQ' ]: 586 l.debug("cc_str: UMUL") 587 return pc_actions_UMUL(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 588 if cc_str == 'G_CC_OP_UMULQ': 589 l.debug("cc_str: UMULQ") 590 return pc_actions_UMULQ(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 591 if cc_str in [ 'G_CC_OP_SMULB', 'G_CC_OP_SMULW', 'G_CC_OP_SMULL', 'G_CC_OP_SMULQ' ]: 592 l.debug("cc_str: SMUL") 593 return pc_actions_SMUL(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 594 if cc_str == 'G_CC_OP_SMULQ': 595 l.debug("cc_str: SMULQ") 596 return pc_actions_SMULQ(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 597 if cc_str in [ 'G_CC_OP_ANDN32', 'G_CC_OP_ANDN64']: 598 l.debug('cc_str: ANDN') 599 return pc_actions_ANDN(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 600 if cc_str in [ 'G_CC_OP_BLSI32', 'G_CC_OP_BLSI64']: 601 l.debug('cc_str: BLSI') 602 return pc_actions_BLSI(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 603 if cc_str in [ 'G_CC_OP_BLSMSK32', 'G_CC_OP_BLSMSK64']: 604 l.debug('cc_str: BLSMSK') 605 return pc_actions_BLSMSK(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 606 if cc_str in [ 'G_CC_OP_BLSR32', 'G_CC_OP_BLSR64']: 607 l.debug('cc_str: BLSR') 608 return pc_actions_BLSR(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform) 609 if cc_str in [ 'G_CC_OP_ADOXL', 'G_CC_OP_ADOXQ' ]: 610 l.debug("cc_str: ADOX") 611 return pc_actions_ADCX(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, False, platform=platform) 612 if cc_str in [ 'G_CC_OP_ADCXL', 'G_CC_OP_ADCXQ' ]: 613 l.debug("cc_str: ADCX") 614 return pc_actions_ADCX(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, True, platform=platform) 615 616 l.error("Unsupported cc_op %d in in pc_calculate_rdata_all_WRK", cc_op) 617 raise SimCCallError("Unsupported cc_op in pc_calculate_rdata_all_WRK") 618 619# This function returns all the data 620def pc_calculate_rdata_all(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform=None): 621 rdata_all = pc_calculate_rdata_all_WRK(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform=platform) 622 if isinstance(rdata_all, tuple): 623 return pc_make_rdata_if_necessary(data[platform]['size'], *rdata_all, platform=platform) 624 else: 625 return rdata_all 626 627# This function takes a condition that is being checked (ie, zero bit), and basically 628# returns that bit 629def pc_calculate_condition(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep, platform=None): 630 rdata_all = pc_calculate_rdata_all_WRK(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform=platform) 631 if isinstance(rdata_all, tuple): 632 cf, pf, af, zf, sf, of = rdata_all 633 v = op_concretize(cond) 634 635 inv = v & 1 636 l.debug("inv: %d", inv) 637 638 if v in [ data[platform]['CondTypes']['CondO'], data[platform]['CondTypes']['CondNO'] ]: 639 l.debug("CondO") 640 #of = claripy.LShR(rdata, data[platform]['G_CC_SHIFT_O']) 641 r = 1 & (inv ^ of) 642 643 elif v in [ data[platform]['CondTypes']['CondZ'], data[platform]['CondTypes']['CondNZ'] ]: 644 l.debug("CondZ") 645 #zf = claripy.LShR(rdata, data[platform]['G_CC_SHIFT_Z']) 646 r = 1 & (inv ^ zf) 647 648 elif v in [ data[platform]['CondTypes']['CondB'], data[platform]['CondTypes']['CondNB'] ]: 649 l.debug("CondB") 650 #cf = claripy.LShR(rdata, data[platform]['G_CC_SHIFT_C']) 651 r = 1 & (inv ^ cf) 652 653 elif v in [ data[platform]['CondTypes']['CondBE'], data[platform]['CondTypes']['CondNBE'] ]: 654 l.debug("CondBE") 655 #cf = claripy.LShR(rdata, data[platform]['G_CC_SHIFT_C']) 656 #zf = claripy.LShR(rdata, data[platform]['G_CC_SHIFT_Z']) 657 r = 1 & (inv ^ (cf | zf)) 658 659 elif v in [ data[platform]['CondTypes']['CondS'], data[platform]['CondTypes']['CondNS'] ]: 660 l.debug("CondS") 661 #sf = claripy.LShR(rdata, data[platform]['G_CC_SHIFT_S']) 662 r = 1 & (inv ^ sf) 663 664 elif v in [ data[platform]['CondTypes']['CondP'], data[platform]['CondTypes']['CondNP'] ]: 665 l.debug("CondP") 666 #pf = claripy.LShR(rdata, data[platform]['G_CC_SHIFT_P']) 667 r = 1 & (inv ^ pf) 668 669 elif v in [ data[platform]['CondTypes']['CondL'], data[platform]['CondTypes']['CondNL'] ]: 670 l.debug("CondL") 671 #sf = claripy.LShR(rdata, data[platform]['G_CC_SHIFT_S']) 672 #of = claripy.LShR(rdata, data[platform]['G_CC_SHIFT_O']) 673 r = 1 & (inv ^ (sf ^ of)) 674 675 elif v in [ data[platform]['CondTypes']['CondLE'], data[platform]['CondTypes']['CondNLE'] ]: 676 l.debug("CondLE") 677 #sf = claripy.LShR(rdata, data[platform]['G_CC_SHIFT_S']) 678 #of = claripy.LShR(rdata, data[platform]['G_CC_SHIFT_O']) 679 #zf = claripy.LShR(rdata, data[platform]['G_CC_SHIFT_Z']) 680 r = 1 & (inv ^ ((sf ^ of) | zf)) 681 else: 682 raise SimCCallError("Unrecognized condition in pc_calculate_condition. Panic.") 683 684 return claripy.Concat(claripy.BVV(0, data[platform]['size']-1), r) 685 else: 686 rdata = rdata_all 687 v = op_concretize(cond) 688 inv = v & 1 689 l.debug("inv: %d", inv) 690 691 692 # THIS IS A FUCKING HACK 693 if v == 0xe: 694 # jle 695 pass 696 if v in [data[platform]['CondTypes']['CondO'], data[platform]['CondTypes']['CondNO']]: 697 l.debug("CondO") 698 of = claripy.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_O']) 699 return 1 & (inv ^ of) 700 701 if v in [data[platform]['CondTypes']['CondZ'], data[platform]['CondTypes']['CondNZ']]: 702 l.debug("CondZ") 703 zf = claripy.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_Z']) 704 return 1 & (inv ^ zf) 705 706 if v in [data[platform]['CondTypes']['CondB'], data[platform]['CondTypes']['CondNB']]: 707 l.debug("CondB") 708 cf = claripy.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_C']) 709 return 1 & (inv ^ cf) 710 711 if v in [data[platform]['CondTypes']['CondBE'], data[platform]['CondTypes']['CondNBE']]: 712 l.debug("CondBE") 713 cf = claripy.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_C']) 714 zf = claripy.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_Z']) 715 return 1 & (inv ^ (cf | zf)) 716 717 if v in [data[platform]['CondTypes']['CondS'], data[platform]['CondTypes']['CondNS']]: 718 l.debug("CondS") 719 sf = claripy.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_S']) 720 return 1 & (inv ^ sf) 721 722 if v in [data[platform]['CondTypes']['CondP'], data[platform]['CondTypes']['CondNP']]: 723 l.debug("CondP") 724 pf = claripy.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_P']) 725 return 1 & (inv ^ pf) 726 727 if v in [data[platform]['CondTypes']['CondL'], data[platform]['CondTypes']['CondNL']]: 728 l.debug("CondL") 729 sf = claripy.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_S']) 730 of = claripy.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_O']) 731 return 1 & (inv ^ (sf ^ of)) 732 733 if v in [data[platform]['CondTypes']['CondLE'], data[platform]['CondTypes']['CondNLE']]: 734 l.debug("CondLE") 735 sf = claripy.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_S']) 736 of = claripy.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_O']) 737 zf = claripy.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_Z']) 738 return 1 & (inv ^ ((sf ^ of) | zf)) 739 740 l.error("Unsupported condition %d in in pc_calculate_condition", v) 741 raise SimCCallError("Unrecognized condition in pc_calculate_condition") 742 743# 744# Simplified CCalls 745# 746 747# Simplified CCalls (whose names look like `pc_actions_<operation>_<condition>`) are a bunch of methods that generate 748# straight-forward ASTs based on the operation and the condition, instead of blindly following the way that a CPU does 749# the conditional flags calculation and generating messy and meaningless ASTs. It allows us to have a meaningful AST 750# for each conditional flag, which greatly helps static analysis (like VSA). 751 752# TODO: Implement the missing ones 753 754# General ops 755def pc_actions_op_SUB(arg_l, arg_r, cc_ndep): 756 return arg_l - arg_r 757 758def pc_actions_op_DEC(arg_l, arg_r, cc_ndep): 759 return arg_l - 1 760 761def pc_actions_op_INC(arg_l, arg_r, cc_ndep): 762 return arg_l + 1 763 764def pc_actions_op_SHR(arg_l, arg_r, cc_ndep): 765 return arg_l >> arg_r 766 767def pc_actions_op_SHL(arg_l, arg_r, cc_ndep): 768 return arg_l << arg_r 769 770def pc_actions_op_ADD(arg_l, arg_r, cc_ndep): 771 return arg_l + arg_r 772 773def pc_actions_op_LOGIC(arg_l, arg_r, cc_ndep): 774 return arg_l 775 776# General conditions 777def pc_actions_cond_CondZ(state, cc_expr): 778 return _cond_flag(cc_expr == 0) 779 780def pc_actions_cond_CondNZ(state, cc_expr): 781 return _cond_flag(cc_expr != 0) 782 783def pc_actions_cond_CondS(state, cc_expr): 784 return _cond_flag(claripy.SLT(cc_expr, 0)) 785 786def pc_actions_cond_CondB(state, cc_expr): 787 return _cond_flag(claripy.ULT(cc_expr, 0)) 788 789def pc_actions_cond_CondBE(state, cc_expr): 790 return _cond_flag(claripy.ULE(cc_expr, 0)) 791 792def pc_actions_cond_CondNBE(state, cc_expr): 793 return _cond_flag(claripy.UGT(cc_expr, 0)) 794 795def pc_actions_cond_CondL(state, cc_expr): 796 return _cond_flag(claripy.SLT(cc_expr, 0)) 797 798def pc_actions_cond_CondLE(state, cc_expr): 799 return _cond_flag(claripy.SLE(cc_expr, 0)) 800 801def pc_actions_cond_CondNLE(state, cc_expr): 802 return _cond_flag(claripy.SGT(cc_expr, 0)) 803 804 805# Specialized versions of (op,cond) to make claripy happy 806def pc_actions_SUB_CondZ(state, arg_l, arg_r, cc_ndep): 807 return _cond_flag(arg_l == arg_r) 808 809def pc_actions_SUB_CondNZ(state, arg_l, arg_r, cc_ndep): 810 return _cond_flag(arg_l != arg_r) 811 812def pc_actions_SUB_CondB(state, arg_l, arg_r, cc_ndep): 813 return _cond_flag(claripy.ULT(arg_l, arg_r)) 814 815def pc_actions_SUB_CondBE(state, arg_l, arg_r, cc_ndep): 816 return _cond_flag(claripy.ULE(arg_l, arg_r)) 817 818def pc_actions_SUB_CondNBE(state, arg_l, arg_r, cc_ndep): 819 return _cond_flag(claripy.UGT(arg_l, arg_r)) 820 821def pc_actions_SUB_CondL(state, arg_l, arg_r, cc_ndep): 822 return _cond_flag(claripy.SLT(arg_l, arg_r)) 823 824def pc_actions_SUB_CondLE(state, arg_l, arg_r, cc_ndep): 825 return _cond_flag(claripy.SLE(arg_l, arg_r)) 826 827def pc_actions_SUB_CondNLE(state, arg_l, arg_r, cc_ndep): 828 return _cond_flag(claripy.SGT(arg_l, arg_r)) 829 830 831def pc_calculate_condition_simple(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep, platform=None): 832 """ 833 A simplified version of pc_calculate_condition(). Please refer to the documentation of Simplified CCalls above. 834 835 Limitation: symbolic flags are not supported for now. 836 """ 837 838 839 # Extract the operation 840 v = op_concretize(cond) 841 cc_op = op_concretize(cc_op) 842 843 if cc_op == data[platform]['OpTypes']['G_CC_OP_COPY']: 844 raise SimCCallError("G_CC_OP_COPY is not supported in pc_calculate_condition_simple(). Consider implementing.") 845 if cc_op == data[platform]['OpTypes']['G_CC_OP_NUMBER']: 846 raise SimCCallError("G_CC_OP_NUMBER is not supported in pc_calculate_condition_simple(). Consider implementing.") 847 848 op = data_inverted[platform]['OpTypes'][cc_op] 849 nbits = _get_nbits(op) 850 op = op[8 : -1] 851 852 # Extract the condition 853 cond = None 854 # TODO: Convert it to a table-lookup later 855 for key, cond_val in data[platform]['CondTypes'].items(): 856 if cond_val == v: 857 cond = key 858 break 859 860 cc_dep1_nbits = cc_dep1[nbits-1:0] 861 cc_dep2_nbits = cc_dep2[nbits-1:0] 862 863 # check for a specialized version first 864 funcname = "pc_actions_%s_%s" % (op, cond) 865 if funcname in globals(): 866 r = globals()[funcname](state, cc_dep1_nbits, cc_dep2_nbits, cc_ndep) 867 else: 868 op_funcname = "pc_actions_op_%s" % op 869 cond_funcname = "pc_actions_cond_%s" % cond 870 if op_funcname in globals() and cond_funcname in globals(): 871 cc_expr = globals()[op_funcname](cc_dep1_nbits, cc_dep2_nbits, cc_ndep) 872 r = globals()[cond_funcname](state, cc_expr) 873 else: 874 l.warning('Operation %s with condition %s is not supported in pc_calculate_condition_simple(). Consider implementing.', op, cond) 875 raise SimCCallError('Operation %s with condition %s not found.' % (op, cond)) 876 877 return claripy.Concat(claripy.BVV(0, data[platform]['size'] - 1), r) 878 879 880def pc_calculate_rdata_c(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform=None): 881 cc_op = op_concretize(cc_op) 882 883 if cc_op == data[platform]['OpTypes']['G_CC_OP_COPY']: 884 return claripy.LShR(cc_dep1, data[platform]['CondBitOffsets']['G_CC_SHIFT_C']) & 1 # TODO: actual constraints 885 elif cc_op in ( data[platform]['OpTypes']['G_CC_OP_LOGICQ'], data[platform]['OpTypes']['G_CC_OP_LOGICL'], data[platform]['OpTypes']['G_CC_OP_LOGICW'], data[platform]['OpTypes']['G_CC_OP_LOGICB'] ): 886 return claripy.BVV(0, data[platform]['size']) # TODO: actual constraints 887 888 rdata_all = pc_calculate_rdata_all_WRK(state, cc_op,cc_dep1,cc_dep2,cc_ndep, platform=platform) 889 890 if isinstance(rdata_all, tuple): 891 cf, pf, af, zf, sf, of = rdata_all 892 return claripy.Concat(claripy.BVV(0, data[platform]['size']-1), cf & 1) 893 else: 894 return claripy.LShR(rdata_all, data[platform]['CondBitOffsets']['G_CC_SHIFT_C']) & 1 895 896def generic_rotate_with_carry(state, left, arg, rot_amt, carry_bit_in, sz): 897 # returns cf, of, result 898 # make sure sz is not symbolic 899 if sz.op != 'BVV': 900 raise SimError('Hit a symbolic "sz" in an x86 rotate with carry instruction. Panic.') 901 902 # convert sz to concrete value 903 sz = sz.args[0] 904 bits = sz * 8 905 bits_in = len(arg) 906 907 # construct bitvec to use for rotation amount - 9/17/33/65 bits 908 if bits > len(rot_amt): 909 raise SimError("Got a rotate instruction for data larger than the provided word size. Panic.") 910 911 if bits == len(rot_amt): 912 sized_amt = (rot_amt & (bits_in - 1)).zero_extend(1) 913 else: 914 sized_amt = (rot_amt & (bits_in - 1))[bits:0] 915 916 assert len(sized_amt) == bits + 1 917 918 # construct bitvec to use for rotating value - 9/17/33/65 bits 919 sized_arg_in = arg[bits-1:0] 920 rotatable_in = carry_bit_in.concat(sized_arg_in) 921 922 # compute and extract 923 op = claripy.RotateLeft if left else claripy.RotateRight 924 rotatable_out = op(rotatable_in, sized_amt) 925 sized_arg_out = rotatable_out[bits-1:0] 926 carry_bit_out = rotatable_out[bits] 927 arg_out = sized_arg_out.zero_extend(bits_in - bits) 928 929 if left: 930 overflow_bit_out = carry_bit_out ^ sized_arg_out[bits-1] 931 else: 932 overflow_bit_out = sized_arg_out[bits-1] ^ sized_arg_out[bits-2] 933 934 # construct final answer 935 return carry_bit_out, overflow_bit_out, arg_out 936 937########################### 938### AMD64-specific ones ### 939########################### 940# https://github.com/angr/vex/blob/master/priv/guest_amd64_helpers.c#L2272 941def amd64g_check_ldmxcsr(state, mxcsr): 942 # /* Decide on a rounding mode. mxcsr[14:13] holds it. */ 943 # /* NOTE, encoded exactly as per enum IRRoundingMode. */ 944 rmode = (mxcsr >> 13) & 3 945 946 # /* Detect any required emulation warnings. */ 947 ew = EmNote_NONE 948 949 if ((mxcsr & 0x1F80) != 0x1F80).is_true: 950 # /* unmasked exceptions! */ 951 ew = EmWarn_X86_sseExns 952 953 elif (mxcsr & (1 << 15)).is_true: 954 # /* FZ is set */ 955 ew = EmWarn_X86_fz 956 elif (mxcsr & (1 << 6)).is_true: 957 # /* DAZ is set */ 958 ew = EmWarn_X86_daz 959 960 return (ew << 32) | rmode 961 962 963# https://github.com/angr/vex/blob/master/priv/guest_amd64_helpers.c#L2304 964def amd64g_create_mxcsr(state, sseround): 965 sseround &= 3 966 return 0x1F80 | (sseround << 13) 967 968 969# https://github.com/angr/vex/blob/master/priv/guest_amd64_helpers.c#L2316 970def amd64g_check_fldcw(state, fpucw): 971 rmode = (fpucw >> 10) & 3 972 ew = EmNote_NONE 973 if ((fpucw & 0x3f) != 0x3f).is_true: 974 # unmasked exceptions 975 ew = EmWarn_X86_x87exns 976 elif (((fpucw >> 8) & 3) != 3).is_true: 977 ew = EmWarn_X86_x87precision 978 return (ew << 32) | rmode 979 980 981# https://github.com/angr/vex/blob/master/priv/guest_amd64_helpers.c#L2342 982def amd64g_create_fpucw(state, fpround): 983 fpround &= 3 984 return 0x037f | (fpround << 10) 985 986 987def amd64g_calculate_RCL(state, arg, rot_amt, eflags_in, sz): 988 if sz.op != 'BVV': 989 raise SimError('Hit a symbolic "sz" in an x86 rotate with carry instruction. Panic.') 990 991 want_flags = claripy.SLT(sz, 0).is_true() 992 if want_flags: sz = -sz 993 carry_bit_in = eflags_in[data['AMD64']['CondBitOffsets']['G_CC_SHIFT_C']] 994 carry_bit_out, overflow_bit_out, arg_out = generic_rotate_with_carry(state, True, arg, rot_amt, carry_bit_in, sz) 995 996 if want_flags: 997 cf = carry_bit_out.zero_extend(63) 998 of = overflow_bit_out.zero_extend(63) 999 eflags_out = eflags_in 1000 eflags_out &= ~(data['AMD64']['CondBitMasks']['G_CC_MASK_C'] | data['AMD64']['CondBitMasks']['G_CC_MASK_O']) 1001 eflags_out |= (cf << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_C']) | \ 1002 (of << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_O']) 1003 return eflags_out 1004 else: 1005 return arg_out 1006 1007def amd64g_calculate_RCR(state, arg, rot_amt, eflags_in, sz): 1008 if sz.op != 'BVV': 1009 raise SimError('Hit a symbolic "sz" in an x86 rotate with carry instruction. Panic.') 1010 1011 want_flags = claripy.SLT(sz, 0).is_true() 1012 if want_flags: sz = -sz 1013 carry_bit_in = eflags_in[data['AMD64']['CondBitOffsets']['G_CC_SHIFT_C']] 1014 carry_bit_out, overflow_bit_out, arg_out = generic_rotate_with_carry(state, False, arg, rot_amt, carry_bit_in, sz) 1015 1016 if want_flags: 1017 cf = carry_bit_out.zero_extend(63) 1018 of = overflow_bit_out.zero_extend(63) 1019 eflags_out = eflags_in 1020 eflags_out &= ~(data['AMD64']['CondBitMasks']['G_CC_MASK_C'] | data['AMD64']['CondBitMasks']['G_CC_MASK_O']) 1021 eflags_out |= (cf << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_C']) | \ 1022 (of << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_O']) 1023 return eflags_out 1024 else: 1025 return arg_out 1026 1027def amd64g_calculate_mmx_pmaddwd(_state, xx, yy): 1028 xx_3, xx_2, xx_1, xx_0 = xx.chop(16) 1029 yy_3, yy_2, yy_1, yy_0 = yy.chop(16) 1030 xx_3 = xx_3.sign_extend(16) 1031 xx_2 = xx_2.sign_extend(16) 1032 xx_1 = xx_1.sign_extend(16) 1033 xx_0 = xx_0.sign_extend(16) 1034 yy_3 = yy_3.sign_extend(16) 1035 yy_2 = yy_2.sign_extend(16) 1036 yy_1 = yy_1.sign_extend(16) 1037 yy_0 = yy_0.sign_extend(16) 1038 1039 res_1 = xx_3 * yy_3 + xx_2 * yy_2 1040 res_0 = xx_1 * yy_1 + xx_0 * yy_0 1041 1042 return claripy.Concat(res_1, res_0) 1043 1044def amd64g_calculate_condition(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep): 1045 if USE_SIMPLIFIED_CCALLS in state.options: 1046 try: 1047 return pc_calculate_condition_simple(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='AMD64') 1048 except KeyError: 1049 pass 1050 return pc_calculate_condition(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='AMD64') 1051 1052def amd64g_calculate_rflags_all(state, cc_op, cc_dep1, cc_dep2, cc_ndep): 1053 return pc_calculate_rdata_all(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='AMD64') 1054 1055def amd64g_calculate_rflags_c(state, cc_op, cc_dep1, cc_dep2, cc_ndep): 1056 return pc_calculate_rdata_c(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='AMD64') 1057 1058########################### 1059### X86-specific ones ### 1060########################### 1061 1062def x86g_calculate_RCL(state, arg, rot_amt, eflags_in, sz): 1063 carry_bit_in = eflags_in[data['X86']['CondBitOffsets']['G_CC_SHIFT_C']] 1064 carry_bit_out, overflow_bit_out, arg_out = generic_rotate_with_carry(state, True, arg, rot_amt, carry_bit_in, sz) 1065 1066 cf = carry_bit_out.zero_extend(31) 1067 of = overflow_bit_out.zero_extend(31) 1068 eflags_out = eflags_in 1069 eflags_out &= ~(data['X86']['CondBitMasks']['G_CC_MASK_C'] | data['X86']['CondBitMasks']['G_CC_MASK_O']) 1070 eflags_out |= (cf << data['X86']['CondBitOffsets']['G_CC_SHIFT_C']) | \ 1071 (of << data['X86']['CondBitOffsets']['G_CC_SHIFT_O']) 1072 1073 return eflags_out.concat(arg_out) 1074 1075def x86g_calculate_RCR(state, arg, rot_amt, eflags_in, sz): 1076 carry_bit_in = eflags_in[data['X86']['CondBitOffsets']['G_CC_SHIFT_C']] 1077 carry_bit_out, overflow_bit_out, arg_out = generic_rotate_with_carry(state, False, arg, rot_amt, carry_bit_in, sz) 1078 1079 cf = carry_bit_out.zero_extend(31) 1080 of = overflow_bit_out.zero_extend(31) 1081 eflags_out = eflags_in 1082 eflags_out &= ~(data['X86']['CondBitMasks']['G_CC_MASK_C'] | data['X86']['CondBitMasks']['G_CC_MASK_O']) 1083 eflags_out |= (cf << data['X86']['CondBitOffsets']['G_CC_SHIFT_C']) | \ 1084 (of << data['X86']['CondBitOffsets']['G_CC_SHIFT_O']) 1085 1086 return eflags_out.concat(arg_out) 1087 1088def x86g_calculate_condition(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep): 1089 if USE_SIMPLIFIED_CCALLS in state.options: 1090 return pc_calculate_condition_simple(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='X86') 1091 else: 1092 return pc_calculate_condition(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='X86') 1093 1094def x86g_calculate_eflags_all(state, cc_op, cc_dep1, cc_dep2, cc_ndep): 1095 return pc_calculate_rdata_all(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='X86') 1096 1097def x86g_calculate_eflags_c(state, cc_op, cc_dep1, cc_dep2, cc_ndep): 1098 return pc_calculate_rdata_c(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='X86') 1099 1100def x86g_check_fldcw(state, fpucw): 1101 return ((fpucw >> 10) & 3).zero_extend(32) 1102 1103def x86g_create_fpucw(state, fpround): 1104 return 0x037f | ((fpround & 3) << 10) 1105 1106def x86g_calculate_daa_das_aaa_aas(state, flags_and_AX, opcode): 1107 assert len(flags_and_AX) == 32 1108 assert opcode.op == 'BVV' 1109 opcode = opcode.args[0] 1110 1111 r_O = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_O'] + 16].zero_extend(31) 1112 r_S = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_S'] + 16].zero_extend(31) 1113 r_Z = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_Z'] + 16].zero_extend(31) 1114 r_A = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_A'] + 16].zero_extend(31) 1115 r_C = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_C'] + 16].zero_extend(31) 1116 r_P = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_P'] + 16].zero_extend(31) 1117 1118 r_AL = (flags_and_AX >> 0) & 0xFF 1119 r_AH = (flags_and_AX >> 8) & 0xFF 1120 1121 zero = claripy.BVV(0, 32) 1122 one = claripy.BVV(1, 32) 1123 1124 if opcode == 0x27: # DAA 1125 old_AL = r_AL 1126 old_C = r_C 1127 1128 condition = claripy.Or((r_AL & 0xF) > 9, r_A == 1) 1129 r_AL = claripy.If(condition, r_AL + 6, old_AL) 1130 r_C = claripy.If(condition, claripy.If(r_AL >= 0x100, one, old_C), zero) 1131 r_A = claripy.If(condition, one, zero) 1132 1133 condition = claripy.Or(old_AL > 0x99, old_C == 1) 1134 r_AL = claripy.If(condition, r_AL + 0x60, r_AL) 1135 r_C = claripy.If(condition, one, zero) 1136 1137 r_AL = r_AL&0xFF 1138 r_O = zero 1139 r_S = claripy.If((r_AL & 0x80) != 0, one, zero) 1140 r_Z = claripy.If(r_AL == 0, one, zero) 1141 r_P = calc_paritybit(r_AL).zero_extend(31) 1142 1143 elif opcode == 0x2F: # DAS 1144 old_AL = r_AL 1145 old_C = r_C 1146 1147 condition = claripy.Or((r_AL & 0xF) > 9, r_A == 1) 1148 r_AL = claripy.If(condition, r_AL - 6, old_AL) 1149 r_C = claripy.If(condition, claripy.If(r_AL < 6, one, zero), zero) 1150 r_A = claripy.If(condition, one, zero) 1151 1152 condition = claripy.Or(old_AL > 0x99, old_C == 1) 1153 r_AL = claripy.If(condition, r_AL - 0x60, r_AL) 1154 r_C = claripy.If(condition, one, zero) 1155 1156 r_AL &= 0xFF 1157 r_O = zero 1158 r_S = claripy.If((r_AL & 0x80) != 0, one, zero) 1159 r_Z = claripy.If(r_AL == 0, one, zero) 1160 r_P = calc_paritybit(r_AL).zero_extend(31) 1161 1162 elif opcode == 0x37: # AAA 1163 nudge = r_AL > 0xF9 1164 condition = claripy.Or((r_AL & 0xF) > 9, r_A == 1) 1165 r_AL = claripy.If(condition, (r_AL + 6) & 0xF, r_AL & 0xF) 1166 r_AH = claripy.If(condition, claripy.If(nudge, r_AH + 2, r_AH + 1), r_AH) 1167 r_A = claripy.If(condition, one, zero) 1168 r_C = claripy.If(condition, one, zero) 1169 r_O = r_S = r_Z = r_P = 0 1170 elif opcode == 0x3F: # AAS 1171 nudge = r_AL < 0x06 1172 condition = claripy.Or((r_AL & 0xF) > 9, r_A == 1) 1173 r_AL = claripy.If(condition, (r_AL - 6) & 0xF, r_AL & 0xF) 1174 r_AH = claripy.If(condition, claripy.If(nudge, r_AH - 2, r_AH - 1), r_AH) 1175 r_A = claripy.If(condition, one, zero) 1176 r_C = claripy.If(condition, one, zero) 1177 r_O = r_S = r_Z = r_P = 0 1178 1179 result = ( (r_O & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_O']) ) \ 1180 | ( (r_S & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_S']) ) \ 1181 | ( (r_Z & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_Z']) ) \ 1182 | ( (r_A & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_A']) ) \ 1183 | ( (r_C & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_C']) ) \ 1184 | ( (r_P & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_P']) ) \ 1185 | ( (r_AH & 0xFF) << 8 ) \ 1186 | ( (r_AL & 0xFF) << 0 ) 1187 return result 1188 1189def x86g_calculate_aad_aam(state, flags_and_AX, opcode): 1190 assert len(flags_and_AX) == 32 1191 assert opcode.op == 'BVV' 1192 opcode = opcode.args[0] 1193 1194 r_AL = (flags_and_AX >> 0) & 0xFF 1195 r_AH = (flags_and_AX >> 8) & 0xFF 1196 1197 if opcode == 0xD4: # AAM 1198 r_AH = r_AL // 10 1199 r_AL = r_AL % 10 1200 elif opcode == 0xD5: # AAD 1201 r_AL = ((r_AH * 10) + r_AL) & 0xff 1202 r_AH = claripy.BVV(0, 32) 1203 else: 1204 raise SimCCallError("Unknown opcode %#x in AAD/AAM ccall" % opcode) 1205 1206 r_O = claripy.BVV(0, 32) 1207 r_C = claripy.BVV(0, 32) 1208 r_A = claripy.BVV(0, 32) 1209 r_S = r_AL[7].zero_extend(31) 1210 r_Z = claripy.If(r_AL == 0, claripy.BVV(1, 32), claripy.BVV(0, 32)) 1211 r_P = calc_paritybit(r_AL).zero_extend(31) 1212 1213 result = ( (r_O & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_O']) ) \ 1214 | ( (r_S & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_S']) ) \ 1215 | ( (r_Z & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_Z']) ) \ 1216 | ( (r_A & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_A']) ) \ 1217 | ( (r_C & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_C']) ) \ 1218 | ( (r_P & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_P']) ) \ 1219 | ( (r_AH & 0xFF) << 8 ) \ 1220 | ( (r_AL & 0xFF) << 0 ) 1221 return result 1222 1223# 1224# x86 segment selection 1225# 1226 1227# Reference for the GDT entry layout 1228# http://wiki.osdev.org/Global_Descriptor_Table 1229def get_segdescr_base(state, descriptor): 1230 lo = descriptor[31:16] 1231 mid = descriptor[39:32] 1232 hi = descriptor[63:56] 1233 return claripy.Concat(hi, mid, lo) 1234 1235def get_segdescr_limit(state, descriptor): 1236 granularity = descriptor[55] 1237 lo = descriptor[15:0] 1238 hi = descriptor[51:48] 1239 limit = claripy.Concat(hi, lo).zero_extend(12) 1240 if (granularity == 0).is_true(): 1241 return limit 1242 else: 1243 return (limit << 12) | 0xfff 1244 1245def x86g_use_seg_selector(state, ldt, gdt, seg_selector, virtual_addr): 1246 # TODO Read/write/exec bit handling 1247 def bad(msg): 1248 if msg: 1249 l.warning("x86g_use_seg_selector: %s", msg) 1250 return claripy.BVV(1 << 32, 32).zero_extend(32) 1251 1252 if (seg_selector & ~0xFFFF != 0).is_true(): 1253 return bad("invalid selector (" + str(seg_selector) + ")") 1254 1255 if virtual_addr.length == 16: 1256 virtual_addr = virtual_addr.zero_extend(16) 1257 1258 # are we in real mode? 1259 if state.arch.vex_archinfo['x86_cr0'] & 1 == 0: 1260 return ((seg_selector << 4) + virtual_addr).zero_extend(32) 1261 1262 seg_selector &= 0x0000FFFF 1263 1264 segment_selector_val = seg_selector >> 3 1265 1266 if state.project.simos.name == "Win32" and (segment_selector_val == 0x6).is_true() and state.project.concrete_target is not None: 1267 return bad("angr doesn't support Windows Heaven's gate calls http://rce.co/knockin-on-heavens-gate-dynamic-processor-mode-switching/ \n" 1268 "Please use the native 32 bit libs (not WoW64) or implement a simprocedure to avoid executing these instructions" 1269 ) 1270 1271 1272 # RPL=11 check 1273 #if state.solver.is_true((seg_selector & 3) != 3): 1274 # return bad() 1275 1276 tiBit = (seg_selector >> 2) & 1 1277 if (tiBit == 0).is_true(): 1278 # GDT access 1279 gdt_value = state.solver.eval_one(gdt) 1280 if gdt_value == 0: 1281 return ((seg_selector << 16) + virtual_addr).zero_extend(32) 1282 1283 seg_selector >>= 3 # bit 3 to 15 are the index in the table 1284 seg_selector = seg_selector.zero_extend(32) 1285 1286 gdt_limit = gdt[15:0] 1287 if (seg_selector >= gdt_limit.zero_extend(48)).is_true(): 1288 return bad("index out of range") 1289 1290 gdt_base = gdt[47:16] 1291 gdt_base_value = state.solver.eval_one(gdt_base) 1292 descriptor = state.memory.load(gdt_base_value + seg_selector * 8, 8, endness='Iend_LE') 1293 else: 1294 # LDT access 1295 ldt_value = state.solver.eval_one(ldt) 1296 if ldt_value == 0: 1297 return ((seg_selector << 16) + virtual_addr).zero_extend(32) 1298 1299 seg_selector >>= 3 # bit 3 to 15 are the index in the table 1300 seg_selector = seg_selector.zero_extend(32) 1301 1302 ldt_limit = ldt[15:0] 1303 if (seg_selector >= ldt_limit.zero_extend(48)).is_true(): 1304 return bad("index out of range") 1305 1306 ldt_base = ldt[47:16] 1307 ldt_base_value = state.solver.eval_one(ldt_base) 1308 1309 ldt_value = state.solver.eval_one(ldt_base) 1310 descriptor = state.memory.load(ldt_value + seg_selector * 8, 8, endness='Iend_LE') 1311 1312 present = descriptor[47] 1313 if (present == 0).is_true(): 1314 return bad("present bit set to 0") 1315 1316 base = get_segdescr_base(state, descriptor) 1317 limit = get_segdescr_limit(state, descriptor) 1318 1319 # When a concrete target is set and memory is read directly from the process sometimes a negative offset 1320 # from a segment register is used 1321 # if state.solver.is_true(virtual_addr >= limit) and state.project.concrete_target is None: 1322 # return bad("virtual_addr >= limit") 1323 1324 r = (base + virtual_addr).zero_extend(32) 1325 l.debug("x86g_use_seg_selector: addr=%s", str(r)) 1326 1327 return r 1328 1329# 1330# other amd64 craziness 1331# 1332 1333EmNote_NONE = 0 1334EmWarn_X86_x87exns = 1 1335EmWarn_X86_x87precision = 2 1336EmWarn_X86_sseExns = 3 1337EmWarn_X86_fz = 4 1338EmWarn_X86_daz = 5 1339EmWarn_X86_acFlag = 6 1340EmWarn_PPCexns = 7 1341EmWarn_PPC64_redir_overflow = 8 1342EmWarn_PPC64_redir_underflow = 9 1343EmWarn_S390X_fpext_rounding = 10 1344EmWarn_S390X_invalid_rounding = 11 1345EmFail_S390X_stfle = 12 1346EmFail_S390X_stckf = 13 1347EmFail_S390X_ecag = 14 1348EmFail_S390X_pfpo = 15 1349EmFail_S390X_DFP_insn = 16 1350EmFail_S390X_fpext = 17 1351EmFail_S390X_invalid_PFPO_rounding_mode = 18 1352EmFail_S390X_invalid_PFPO_function = 19 1353 1354 1355def amd64g_create_mxcsr(state, sseround): 1356 return 0x1F80 | ((sseround & 3) << 13) 1357 1358def amd64g_check_ldmxcsr(state, mxcsr): 1359 rmode = claripy.LShR(mxcsr, 13) & 3 1360 1361 ew = claripy.If( 1362 (mxcsr & 0x1F80) != 0x1F80, 1363 claripy.BVV(EmWarn_X86_sseExns, 64), 1364 claripy.If( 1365 mxcsr & (1<<15) != 0, 1366 claripy.BVV(EmWarn_X86_fz, 64), 1367 claripy.If( 1368 mxcsr & (1<<6) != 0, 1369 claripy.BVV(EmWarn_X86_daz, 64), 1370 claripy.BVV(EmNote_NONE, 64) 1371 ) 1372 ) 1373 ) 1374 1375 return (ew << 32) | rmode 1376 1377################# 1378### ARM Flags ### 1379################# 1380 1381ARMCondEQ = 0 # /* equal : Z=1 */ 1382ARMCondNE = 1 # /* not equal : Z=0 */ 1383ARMCondHS = 2 # /* >=u (higher or same) : C=1 */ 1384ARMCondLO = 3 # /* <u (lower) : C=0 */ 1385ARMCondMI = 4 # /* minus (negative) : N=1 */ 1386ARMCondPL = 5 # /* plus (zero or +ve) : N=0 */ 1387ARMCondVS = 6 # /* overflow : V=1 */ 1388ARMCondVC = 7 # /* no overflow : V=0 */ 1389ARMCondHI = 8 # /* >u (higher) : C=1 && Z=0 */ 1390ARMCondLS = 9 # /* <=u (lower or same) : C=0 || Z=1 */ 1391ARMCondGE = 10 # /* >=s (signed greater or equal) : N=V */ 1392ARMCondLT = 11 # /* <s (signed less than) : N!=V */ 1393ARMCondGT = 12 # /* >s (signed greater) : Z=0 && N=V */ 1394ARMCondLE = 13 # /* <=s (signed less or equal) : Z=1 || N!=V */ 1395ARMCondAL = 14 # /* always (unconditional) : 1 */ 1396ARMCondNV = 15 # /* never (unconditional): : 0 */ 1397 1398ARMG_CC_OP_COPY = 0 # /* DEP1 = NZCV in 31:28, DEP2 = 0, DEP3 = 0 just copy DEP1 to output */ 1399ARMG_CC_OP_ADD = 1 # /* DEP1 = argL (Rn) = DEP2 = argR (shifter_op) = DEP3 = 0 */ 1400ARMG_CC_OP_SUB = 2 # /* DEP1 = argL (Rn) = DEP2 = argR (shifter_op) = DEP3 = 0 */ 1401ARMG_CC_OP_ADC = 3 # /* DEP1 = argL (Rn) = DEP2 = arg2 (shifter_op) = DEP3 = oldC (in LSB) */ 1402ARMG_CC_OP_SBB = 4 # /* DEP1 = argL (Rn) = DEP2 = arg2 (shifter_op) = DEP3 = oldC (in LSB) */ 1403ARMG_CC_OP_LOGIC = 5 # /* DEP1 = result = DEP2 = shifter_carry_out (in LSB) = DEP3 = old V flag (in LSB) */ 1404ARMG_CC_OP_MUL = 6 # /* DEP1 = result = DEP2 = 0 = DEP3 = oldC:old_V (in bits 1:0) */ 1405ARMG_CC_OP_MULL = 7 # /* DEP1 = resLO32 = DEP2 = resHI32 = DEP3 = oldC:old_V (in bits 1:0) */ 1406ARMG_CC_OP_NUMBER = 8 1407 1408ARMG_CC_SHIFT_N = 31 1409ARMG_CC_SHIFT_Z = 30 1410ARMG_CC_SHIFT_C = 29 1411ARMG_CC_SHIFT_V = 28 1412ARMG_CC_SHIFT_Q = 27 1413 1414ARMG_NBITS = 32 1415 1416def armg_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3): 1417 concrete_op = op_concretize(cc_op) 1418 flag = None 1419 1420 if concrete_op == ARMG_CC_OP_COPY: 1421 flag = claripy.LShR(cc_dep1, ARMG_CC_SHIFT_N) & 1 1422 elif concrete_op == ARMG_CC_OP_ADD: 1423 res = cc_dep1 + cc_dep2 1424 flag = claripy.LShR(res, 31) 1425 elif concrete_op == ARMG_CC_OP_SUB: 1426 res = cc_dep1 - cc_dep2 1427 flag = claripy.LShR(res, 31) 1428 elif concrete_op == ARMG_CC_OP_ADC: 1429 res = cc_dep1 + cc_dep2 + cc_dep3 1430 flag = claripy.LShR(res, 31) 1431 elif concrete_op == ARMG_CC_OP_SBB: 1432 res = cc_dep1 - cc_dep2 - (cc_dep3^1) 1433 flag = claripy.LShR(res, 31) 1434 elif concrete_op == ARMG_CC_OP_LOGIC: 1435 flag = claripy.LShR(cc_dep1, 31) 1436 elif concrete_op == ARMG_CC_OP_MUL: 1437 flag = claripy.LShR(cc_dep1, 31) 1438 elif concrete_op == ARMG_CC_OP_MULL: 1439 flag = claripy.LShR(cc_dep2, 31) 1440 1441 if flag is not None: return flag 1442 l.error("Unknown cc_op %s (armg_calculate_flag_n)", cc_op) 1443 raise SimCCallError("Unknown cc_op %s" % cc_op) 1444 1445def arm_zerobit(state, x): 1446 return calc_zerobit(x).zero_extend(31) 1447 1448def armg_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3): 1449 concrete_op = op_concretize(cc_op) 1450 flag = None 1451 1452 if concrete_op == ARMG_CC_OP_COPY: 1453 flag = claripy.LShR(cc_dep1, ARMG_CC_SHIFT_Z) & 1 1454 elif concrete_op == ARMG_CC_OP_ADD: 1455 res = cc_dep1 + cc_dep2 1456 flag = arm_zerobit(state, res) 1457 elif concrete_op == ARMG_CC_OP_SUB: 1458 res = cc_dep1 - cc_dep2 1459 flag = arm_zerobit(state, res) 1460 elif concrete_op == ARMG_CC_OP_ADC: 1461 res = cc_dep1 + cc_dep2 + cc_dep3 1462 flag = arm_zerobit(state, res) 1463 elif concrete_op == ARMG_CC_OP_SBB: 1464 res = cc_dep1 - cc_dep2 - (cc_dep3^1) 1465 flag = arm_zerobit(state, res) 1466 elif concrete_op == ARMG_CC_OP_LOGIC: 1467 flag = arm_zerobit(state, cc_dep1) 1468 elif concrete_op == ARMG_CC_OP_MUL: 1469 flag = arm_zerobit(state, cc_dep1) 1470 elif concrete_op == ARMG_CC_OP_MULL: 1471 flag = arm_zerobit(state, cc_dep1 | cc_dep2) 1472 1473 if flag is not None: return flag 1474 1475 l.error("Unknown cc_op %s (armg_calculate_flag_z)", concrete_op) 1476 raise SimCCallError("Unknown cc_op %s" % concrete_op) 1477 1478def armg_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3): 1479 concrete_op = op_concretize(cc_op) 1480 flag = None 1481 1482 if concrete_op == ARMG_CC_OP_COPY: 1483 flag = claripy.LShR(cc_dep1, ARMG_CC_SHIFT_C) & 1 1484 elif concrete_op == ARMG_CC_OP_ADD: 1485 res = cc_dep1 + cc_dep2 1486 flag = boolean_extend(claripy.ULT, res, cc_dep1, 32) 1487 elif concrete_op == ARMG_CC_OP_SUB: 1488 flag = boolean_extend(claripy.UGE, cc_dep1, cc_dep2, 32) 1489 elif concrete_op == ARMG_CC_OP_ADC: 1490 res = cc_dep1 + cc_dep2 + cc_dep3 1491 flag = claripy.If(cc_dep3 != 0, boolean_extend(claripy.ULE, res, cc_dep1, 32), 1492 boolean_extend(claripy.ULT, res, cc_dep1, 32)) 1493 elif concrete_op == ARMG_CC_OP_SBB: 1494 flag = claripy.If(cc_dep3 != 0, boolean_extend(claripy.UGE, cc_dep1, cc_dep2, 32), 1495 boolean_extend(claripy.UGT, cc_dep1, cc_dep2, 32)) 1496 elif concrete_op == ARMG_CC_OP_LOGIC: 1497 flag = cc_dep2 1498 elif concrete_op == ARMG_CC_OP_MUL: 1499 flag = (claripy.LShR(cc_dep3, 1)) & 1 1500 elif concrete_op == ARMG_CC_OP_MULL: 1501 flag = (claripy.LShR(cc_dep3, 1)) & 1 1502 1503 if flag is not None: return flag 1504 1505 l.error("Unknown cc_op %s (armg_calculate_flag_c)", cc_op) 1506 raise SimCCallError("Unknown cc_op %s" % cc_op) 1507 1508def armg_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3): 1509 concrete_op = op_concretize(cc_op) 1510 flag = None 1511 1512 if concrete_op == ARMG_CC_OP_COPY: 1513 flag = claripy.LShR(cc_dep1, ARMG_CC_SHIFT_V) & 1 1514 elif concrete_op == ARMG_CC_OP_ADD: 1515 res = cc_dep1 + cc_dep2 1516 v = ((res ^ cc_dep1) & (res ^ cc_dep2)) 1517 flag = claripy.LShR(v, 31) 1518 elif concrete_op == ARMG_CC_OP_SUB: 1519 res = cc_dep1 - cc_dep2 1520 v = ((cc_dep1 ^ cc_dep2) & (cc_dep1 ^ res)) 1521 flag = claripy.LShR(v, 31) 1522 elif concrete_op == ARMG_CC_OP_ADC: 1523 res = cc_dep1 + cc_dep2 + cc_dep3 1524 v = ((res ^ cc_dep1) & (res ^ cc_dep2)) 1525 flag = claripy.LShR(v, 31) 1526 elif concrete_op == ARMG_CC_OP_SBB: 1527 res = cc_dep1 - cc_dep2 - (cc_dep3^1) 1528 v = ((cc_dep1 ^ cc_dep2) & (cc_dep1 ^ res)) 1529 flag = claripy.LShR(v, 31) 1530 elif concrete_op == ARMG_CC_OP_LOGIC: 1531 flag = cc_dep3 1532 elif concrete_op == ARMG_CC_OP_MUL: 1533 flag = cc_dep3 & 1 1534 elif concrete_op == ARMG_CC_OP_MULL: 1535 flag = cc_dep3 & 1 1536 1537 if flag is not None: return flag 1538 1539 l.error("Unknown cc_op %s (armg_calculate_flag_v)", cc_op) 1540 raise SimCCallError("Unknown cc_op %s" % cc_op) 1541 1542def armg_calculate_flags_nzcv(state, cc_op, cc_dep1, cc_dep2, cc_dep3): 1543 # NOTE: adding constraints afterwards works here *only* because the constraints are actually useless, because we require 1544 # cc_op to be unique. If we didn't, we'd need to pass the constraints into any functions called after the constraints were 1545 # created. 1546 n = armg_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1547 z = armg_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1548 c = armg_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1549 v = armg_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1550 vec = [(ARMG_CC_SHIFT_N, claripy.Extract(0, 0, n)), 1551 (ARMG_CC_SHIFT_Z, claripy.Extract(0, 0, z)), 1552 (ARMG_CC_SHIFT_C, claripy.Extract(0, 0, c)), 1553 (ARMG_CC_SHIFT_V, claripy.Extract(0, 0, v))] 1554 return _concat_flags(ARMG_NBITS, vec) 1555 1556 1557def armg_calculate_condition(state, cond_n_op, cc_dep1, cc_dep2, cc_dep3): 1558 concrete_cond_n_op = op_concretize(cond_n_op) 1559 1560 cond = concrete_cond_n_op >> 4 1561 cc_op = concrete_cond_n_op & 0xF 1562 inv = cond & 1 1563 1564 concrete_cond = op_concretize(cond) 1565 flag = None 1566 1567 # NOTE: adding constraints afterwards works here *only* because the constraints are actually useless, because we require 1568 # cc_op to be unique. If we didn't, we'd need to pass the constraints into any functions called after the constraints were 1569 # created. 1570 1571 if concrete_cond == ARMCondAL: 1572 flag = claripy.BVV(1, 32) 1573 elif concrete_cond in [ ARMCondEQ, ARMCondNE ]: 1574 zf = armg_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1575 flag = inv ^ zf 1576 elif concrete_cond in [ ARMCondHS, ARMCondLO ]: 1577 cf = armg_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1578 flag = inv ^ cf 1579 elif concrete_cond in [ ARMCondMI, ARMCondPL ]: 1580 nf = armg_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1581 flag = inv ^ nf 1582 elif concrete_cond in [ ARMCondVS, ARMCondVC ]: 1583 vf = armg_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1584 flag = inv ^ vf 1585 elif concrete_cond in [ ARMCondHI, ARMCondLS ]: 1586 cf = armg_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1587 zf = armg_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1588 flag = inv ^ (cf & ~zf) 1589 elif concrete_cond in [ ARMCondGE, ARMCondLT ]: 1590 nf = armg_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1591 vf = armg_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1592 flag = inv ^ (1 & ~(nf ^ vf)) 1593 elif concrete_cond in [ ARMCondGT, ARMCondLE ]: 1594 nf = armg_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1595 vf = armg_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1596 zf = armg_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1597 flag = inv ^ (1 & ~(zf | (nf ^ vf))) 1598 1599 if flag is not None: return flag 1600 1601 l.error("Unrecognized condition %d in armg_calculate_condition", concrete_cond) 1602 raise SimCCallError("Unrecognized condition %d in armg_calculate_condition" % concrete_cond) 1603 1604ARM64G_CC_SHIFT_N = 31 1605ARM64G_CC_SHIFT_Z = 30 1606ARM64G_CC_SHIFT_C = 29 1607ARM64G_CC_SHIFT_V = 28 1608 1609ARM64G_CC_OP_COPY=0 #/* DEP1 = NZCV in 31:28, DEP2 = 0, DEP3 = 0 just copy DEP1 to output */ 1610ARM64G_CC_OP_ADD32=1 #/* DEP1 = argL (Rn), DEP2 = argR (shifter_op), DEP3 = 0 */ 1611ARM64G_CC_OP_ADD64=2 #/* DEP1 = argL (Rn), DEP2 = argR (shifter_op), DEP3 = 0 */ 1612ARM64G_CC_OP_SUB32=3 #/* DEP1 = argL (Rn), DEP2 = argR (shifter_op), DEP3 = 0 */ 1613ARM64G_CC_OP_SUB64=4 #/* DEP1 = argL (Rn), DEP2 = argR (shifter_op), DEP3 = 0 */ 1614ARM64G_CC_OP_ADC32=5 #/* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op), DEP3 = oldC (in LSB) */ 1615ARM64G_CC_OP_ADC64=6 #/* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op), DEP3 = oldC (in LSB) */ 1616ARM64G_CC_OP_SBC32=7 #/* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op), DEP3 = oldC (in LSB) */ 1617ARM64G_CC_OP_SBC64=8 #/* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op), DEP3 = oldC (in LSB) */ 1618ARM64G_CC_OP_LOGIC32=9 #/* DEP1 = result, DEP2 = 0, DEP3 = 0 */ 1619ARM64G_CC_OP_LOGIC64=10 #/* DEP1 = result, DEP2 = 0, DEP3 = 0 */ 1620ARM64G_CC_OP_NUMBER=11 # 1621 1622ARM64CondEQ = 0 #/* equal : Z=1 */ 1623ARM64CondNE = 1 #/* not equal : Z=0 */ 1624ARM64CondCS = 2 #/* >=u (higher or same) (aka HS) : C=1 */ 1625ARM64CondCC = 3 #/* <u (lower) (aka LO) : C=0 */ 1626ARM64CondMI = 4 #/* minus (negative) : N=1 */ 1627ARM64CondPL = 5 #/* plus (zero or +ve) : N=0 */ 1628ARM64CondVS = 6 #/* overflow : V=1 */ 1629ARM64CondVC = 7 #/* no overflow : V=0 */ 1630ARM64CondHI = 8 #/* >u (higher) : C=1 && Z=0 */ 1631ARM64CondLS = 9 #/* <=u (lower or same) : C=0 || Z=1 */ 1632ARM64CondGE = 10 #/* >=s (signed greater or equal) : N=V */ 1633ARM64CondLT = 11 #/* <s (signed less than) : N!=V */ 1634ARM64CondGT = 12 #/* >s (signed greater) : Z=0 && N=V */ 1635ARM64CondLE = 13 #/* <=s (signed less or equal) : Z=1 || N!=V */ 1636ARM64CondAL = 14 #/* always (unconditional) : 1 */ 1637ARM64CondNV = 15 #/* always (unconditional) : 1 */ 1638 1639ARM64G_NBITS = 64 1640 1641def arm64g_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3): 1642 concrete_op = op_concretize(cc_op) 1643 flag = None 1644 1645 cc_dep1, cc_dep2, cc_dep3 = arm64g_32bit_truncate_operands(concrete_op, cc_dep1, cc_dep2, cc_dep3) 1646 1647 if concrete_op == ARM64G_CC_OP_COPY: 1648 flag = claripy.LShR(cc_dep1, ARM64G_CC_SHIFT_N) & 1 1649 elif concrete_op == ARM64G_CC_OP_ADD32: 1650 res = cc_dep1 + cc_dep2 1651 flag = claripy.LShR(res, 31) 1652 elif concrete_op == ARM64G_CC_OP_ADD64: 1653 res = cc_dep1 + cc_dep2 1654 flag = claripy.LShR(res, 63) 1655 elif concrete_op == ARM64G_CC_OP_SUB32: 1656 res = cc_dep1 - cc_dep2 1657 flag = claripy.LShR(res, 31) 1658 elif concrete_op == ARM64G_CC_OP_SUB64: 1659 res = cc_dep1 - cc_dep2 1660 flag = claripy.LShR(res, 63) 1661 elif concrete_op == ARM64G_CC_OP_ADC32: 1662 res = cc_dep1 + cc_dep2 + cc_dep3 1663 flag = claripy.LShR(res, 31) 1664 elif concrete_op == ARM64G_CC_OP_ADC64: 1665 res = cc_dep1 + cc_dep2 + cc_dep3 1666 flag = claripy.LShR(res, 63) 1667 elif concrete_op == ARM64G_CC_OP_SBC32: 1668 res = cc_dep1 - cc_dep2 - (cc_dep3^1) 1669 flag = claripy.LShR(res, 31) 1670 elif concrete_op == ARM64G_CC_OP_SBC64: 1671 res = cc_dep1 - cc_dep2 - (cc_dep3^1) 1672 flag = claripy.LShR(res, 63) 1673 elif concrete_op == ARM64G_CC_OP_LOGIC32: 1674 flag = claripy.LShR(cc_dep1, 31) 1675 elif concrete_op == ARM64G_CC_OP_LOGIC64: 1676 flag = claripy.LShR(cc_dep1, 63) 1677 1678 if flag is not None: 1679 if len(flag) == 32: 1680 flag = flag.zero_extend(32) 1681 return flag 1682 l.error("Unknown cc_op %s (arm64g_calculate_flag_n)", cc_op) 1683 raise SimCCallError("Unknown cc_op %s" % cc_op) 1684 1685 1686def arm64_zerobit(state, x): 1687 return calc_zerobit(x).zero_extend(63) 1688 1689 1690def u64_to_u32(n): 1691 return n[31:0] 1692 1693 1694def arm64g_32bit_truncate_operands(cc_op, cc_dep1, cc_dep2, cc_dep3): 1695 # Truncate operands if in 32-bit mode 1696 if cc_op in {ARM64G_CC_OP_ADD32, ARM64G_CC_OP_SUB32}: 1697 cc_dep1 = u64_to_u32(cc_dep1) 1698 cc_dep2 = u64_to_u32(cc_dep2) 1699 elif cc_op in {ARM64G_CC_OP_ADC32, ARM64G_CC_OP_SBC32}: 1700 cc_dep1 = u64_to_u32(cc_dep1) 1701 cc_dep2 = u64_to_u32(cc_dep2) 1702 cc_dep3 = u64_to_u32(cc_dep3) 1703 elif cc_op == ARM64G_CC_OP_LOGIC32: 1704 cc_dep1 = u64_to_u32(cc_dep1) 1705 return cc_dep1, cc_dep2, cc_dep3 1706 1707 1708def arm64g_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3): 1709 concrete_op = op_concretize(cc_op) 1710 flag = None 1711 1712 cc_dep1, cc_dep2, cc_dep3 = arm64g_32bit_truncate_operands(concrete_op, cc_dep1, cc_dep2, cc_dep3) 1713 1714 if concrete_op == ARM64G_CC_OP_COPY: 1715 flag = claripy.LShR(cc_dep1, ARM64G_CC_SHIFT_Z) & 1 1716 elif concrete_op in (ARM64G_CC_OP_ADD32, ARM64G_CC_OP_ADD64): 1717 res = cc_dep1 + cc_dep2 1718 flag = arm64_zerobit(state, res) 1719 elif concrete_op in (ARM64G_CC_OP_SUB32, ARM64G_CC_OP_SUB64): 1720 res = cc_dep1 - cc_dep2 1721 flag = arm64_zerobit(state, res) 1722 elif concrete_op in (ARM64G_CC_OP_ADC32, ARM64G_CC_OP_ADC64): 1723 res = cc_dep1 + cc_dep2 + cc_dep3 1724 flag = arm64_zerobit(state, res) 1725 elif concrete_op in (ARM64G_CC_OP_SBC32, ARM64G_CC_OP_SBC64): 1726 res = cc_dep1 - cc_dep2 - (cc_dep3^1) 1727 flag = arm64_zerobit(state, res) 1728 elif concrete_op in (ARM64G_CC_OP_LOGIC32, ARM64G_CC_OP_LOGIC64): 1729 flag = arm64_zerobit(state, cc_dep1) 1730 1731 if flag is not None: 1732 if len(flag) == 32: 1733 flag = flag.zero_extend(32) 1734 return flag 1735 1736 l.error("Unknown cc_op %s (arm64g_calculate_flag_z)", concrete_op) 1737 raise SimCCallError("Unknown cc_op %s" % concrete_op) 1738 1739def arm64g_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3): 1740 concrete_op = op_concretize(cc_op) 1741 flag = None 1742 1743 cc_dep1, cc_dep2, cc_dep3 = arm64g_32bit_truncate_operands(concrete_op, cc_dep1, cc_dep2, cc_dep3) 1744 1745 if concrete_op == ARM64G_CC_OP_COPY: 1746 flag = claripy.LShR(cc_dep1, ARM64G_CC_SHIFT_C) & 1 1747 elif concrete_op in (ARM64G_CC_OP_ADD32, ARM64G_CC_OP_ADD64): 1748 res = cc_dep1 + cc_dep2 1749 flag = boolean_extend(claripy.ULT, res, cc_dep1, 64) 1750 elif concrete_op in (ARM64G_CC_OP_SUB32, ARM64G_CC_OP_SUB64): 1751 flag = boolean_extend(claripy.UGE, cc_dep1, cc_dep2, 64) 1752 elif concrete_op in (ARM64G_CC_OP_ADC32, ARM64G_CC_OP_ADC64): 1753 res = cc_dep1 + cc_dep2 + cc_dep3 1754 flag = claripy.If(cc_dep2 != 0, boolean_extend(claripy.ULE, res, cc_dep1, 64), 1755 boolean_extend(claripy.ULT, res, cc_dep1, 64)) 1756 elif concrete_op in (ARM64G_CC_OP_SBC32, ARM64G_CC_OP_SBC64): 1757 flag = claripy.If(cc_dep2 != 0, boolean_extend(claripy.UGE, cc_dep1, cc_dep2, 64), 1758 boolean_extend(claripy.UGT, cc_dep1, cc_dep2, 64)) 1759 elif concrete_op in (ARM64G_CC_OP_LOGIC32, ARM64G_CC_OP_LOGIC64): 1760 flag = claripy.BVV(0, 64) # C after logic is zero on arm64 1761 1762 if flag is not None: return flag 1763 1764 l.error("Unknown cc_op %s (arm64g_calculate_flag_c)", cc_op) 1765 raise SimCCallError("Unknown cc_op %s" % cc_op) 1766 1767def arm64g_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3): 1768 concrete_op = op_concretize(cc_op) 1769 flag = None 1770 1771 cc_dep1, cc_dep2, cc_dep3 = arm64g_32bit_truncate_operands(concrete_op, cc_dep1, cc_dep2, cc_dep3) 1772 1773 if concrete_op == ARM64G_CC_OP_COPY: 1774 flag = claripy.LShR(cc_dep1, ARM64G_CC_SHIFT_V) & 1 1775 elif concrete_op == ARM64G_CC_OP_ADD32: 1776 cc_dep1 = cc_dep1[31:0] 1777 cc_dep2 = cc_dep2[31:0] 1778 res = cc_dep1 + cc_dep2 1779 v = ((res ^ cc_dep1) & (res ^ cc_dep2)) 1780 flag = claripy.LShR(v, 31).zero_extend(32) 1781 elif concrete_op == ARM64G_CC_OP_ADD64: 1782 res = cc_dep1 + cc_dep2 1783 v = ((res ^ cc_dep1) & (res ^ cc_dep2)) 1784 flag = claripy.LShR(v, 63) 1785 elif concrete_op == ARM64G_CC_OP_SUB32: 1786 cc_dep1 = cc_dep1[31:0] 1787 cc_dep2 = cc_dep2[31:0] 1788 res = cc_dep1 - cc_dep2 1789 v = ((cc_dep1 ^ cc_dep2) & (cc_dep1 ^ res)) 1790 flag = claripy.LShR(v, 31).zero_extend(32) 1791 elif concrete_op == ARM64G_CC_OP_SUB64: 1792 res = cc_dep1 - cc_dep2 1793 v = ((cc_dep1 ^ cc_dep2) & (cc_dep1 ^ res)) 1794 flag = claripy.LShR(v, 63) 1795 elif concrete_op == ARM64G_CC_OP_ADC32: 1796 cc_dep1 = cc_dep1[31:0] 1797 cc_dep2 = cc_dep2[31:0] 1798 res = cc_dep1 + cc_dep2 + cc_dep3 1799 v = ((res ^ cc_dep1) & (res ^ cc_dep2)) 1800 flag = claripy.LShR(v, 31).zero_extend(32) 1801 elif concrete_op == ARM64G_CC_OP_ADC64: 1802 res = cc_dep1 + cc_dep2 + cc_dep3 1803 v = ((res ^ cc_dep1) & (res ^ cc_dep2)) 1804 flag = claripy.LShR(v, 63) 1805 elif concrete_op == ARM64G_CC_OP_SBC32: 1806 cc_dep1 = cc_dep1[31:0] 1807 cc_dep2 = cc_dep2[31:0] 1808 res = cc_dep1 - cc_dep2 - (cc_dep3^1) 1809 v = ((cc_dep1 ^ cc_dep2) & (cc_dep1 ^ res)) 1810 flag = claripy.LShR(v, 31).zero_extend(32) 1811 elif concrete_op == ARM64G_CC_OP_SBC64: 1812 res = cc_dep1 - cc_dep2 - (cc_dep3^1) 1813 v = ((cc_dep1 ^ cc_dep2) & (cc_dep1 ^ res)) 1814 flag = claripy.LShR(v, 63) 1815 elif concrete_op in (ARM64G_CC_OP_LOGIC32, ARM64G_CC_OP_LOGIC64): 1816 flag = claripy.BVV(0, 64) 1817 1818 if flag is not None: return flag 1819 1820 l.error("Unknown cc_op %s (arm64g_calculate_flag_v)", cc_op) 1821 raise SimCCallError("Unknown cc_op %s" % cc_op) 1822 1823def arm64g_calculate_data_nzcv(state, cc_op, cc_dep1, cc_dep2, cc_dep3): 1824 # NOTE: adding constraints afterwards works here *only* because the constraints are actually useless, because we require 1825 # cc_op to be unique. If we didn't, we'd need to pass the constraints into any functions called after the constraints were 1826 # created. 1827 n = arm64g_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1828 z = arm64g_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1829 c = arm64g_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1830 v = arm64g_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1831 vec = [(ARM64G_CC_SHIFT_N, claripy.Extract(0, 0, n)), 1832 (ARM64G_CC_SHIFT_Z, claripy.Extract(0, 0, z)), 1833 (ARM64G_CC_SHIFT_C, claripy.Extract(0, 0, c)), 1834 (ARM64G_CC_SHIFT_V, claripy.Extract(0, 0, v))] 1835 return _concat_flags(ARM64G_NBITS, vec) 1836 1837def arm64g_calculate_condition(state, cond_n_op, cc_dep1, cc_dep2, cc_dep3): 1838 concretize_cond_n_op = op_concretize(cond_n_op) 1839 cond = concretize_cond_n_op >> 4 1840 cc_op = concretize_cond_n_op & 0xF 1841 inv = cond & 1 1842 1843 concrete_cond = cond 1844 flag = None 1845 1846 if concrete_cond in (ARM64CondAL, ARM64CondNV): 1847 flag = claripy.BVV(1, 64) 1848 elif concrete_cond in (ARM64CondEQ, ARM64CondNE): 1849 zf = arm64g_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1850 flag = inv ^ zf 1851 elif concrete_cond in (ARM64CondCS, ARM64CondCC): 1852 cf = arm64g_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1853 flag = inv ^ cf 1854 elif concrete_cond in (ARM64CondMI, ARM64CondPL): 1855 nf = arm64g_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1856 flag = inv ^ nf 1857 elif concrete_cond in (ARM64CondVS, ARM64CondVC): 1858 vf = arm64g_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1859 flag = inv ^ vf 1860 elif concrete_cond in (ARM64CondHI, ARM64CondLS): 1861 cf = arm64g_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1862 zf = arm64g_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1863 flag = inv ^ (1 & (cf & ~zf)) 1864 elif concrete_cond in (ARM64CondGE, ARM64CondLT): 1865 nf = arm64g_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1866 vf = arm64g_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1867 flag = inv ^ (1 & ~(nf ^ vf)) 1868 elif concrete_cond in (ARM64CondGT, ARM64CondLE): 1869 nf = arm64g_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1870 vf = arm64g_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1871 zf = arm64g_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3) 1872 flag = inv ^ (1 & ~(zf | (nf ^ vf))) 1873 1874 if flag is not None: return flag 1875 1876 l.error("Unrecognized condition %d in arm64g_calculate_condition", concrete_cond) 1877 raise SimCCallError("Unrecognized condition %d in arm64g_calculate_condition" % concrete_cond) 1878 1879# 1880# Some helpers 1881# 1882 1883def _get_flags(state) -> claripy.ast.bv.BV: 1884 cc_op = strip_simaction(state.regs.cc_op) 1885 cc_dep1 = strip_simaction(state.regs.cc_dep1) 1886 cc_dep2 = strip_simaction(state.regs.cc_dep2) 1887 cc_ndep = strip_simaction(state.regs.cc_ndep) 1888 if state.arch.name == 'X86': 1889 func = x86g_calculate_eflags_all 1890 elif state.arch.name == 'AMD64': 1891 func = amd64g_calculate_rflags_all 1892 elif is_arm_arch(state.arch): 1893 func = armg_calculate_flags_nzcv 1894 elif state.arch.name == 'AARCH64': 1895 func = arm64g_calculate_data_nzcv 1896 else: 1897 l.warning("No such thing as a flags register for arch %s", state.arch.name) 1898 return None 1899 try: 1900 return func(state, cc_op, cc_dep1, cc_dep2, cc_ndep) 1901 except CCallMultivaluedException as e: 1902 cases = e.args[0] 1903 return claripy.ite_cases([(case, func(state, value, cc_dep1, cc_dep2, cc_ndep)) for case, value in cases], 0) 1904 1905def _concat_flags(nbits, flags_vec): 1906 """ 1907 Concatenate different flag BVs to a single BV. Currently used for ARM, X86 1908 and AMD64. 1909 :param nbits : platform size in bits. 1910 :param flags_vec: vector of flag BVs and their offset in the resulting BV. 1911 1912 :type nbits : int 1913 :type flags_vec : list 1914 1915 :return : the resulting flag BV. 1916 :rtype : claripy.BVV 1917 """ 1918 1919 result = claripy.BVV(0, 0) 1920 for offset, bit in flags_vec: 1921 current_position = nbits - 1 - result.length 1922 result = result.concat(claripy.BVV(0, current_position - offset), bit) 1923 result = result.concat(claripy.BVV(0, nbits - result.length)) 1924 return result 1925 1926def _get_nbits(cc_str): 1927 nbits = None 1928 if cc_str.endswith('B'): 1929 nbits = 8 1930 elif cc_str.endswith('W'): 1931 nbits = 16 1932 elif cc_str.endswith('L'): 1933 nbits = 32 1934 elif cc_str.endswith('Q'): 1935 nbits = 64 1936 elif cc_str.endswith('32'): 1937 nbits = 32 1938 elif cc_str.endswith('64'): 1939 nbits = 64 1940 return nbits 1941 1942from angr.errors import SimError, SimCCallError 1943from angr.sim_options import USE_SIMPLIFIED_CCALLS 1944