1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de> 5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net> 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 */ 26 27/* We only support generating code for 64-bit mode. */ 28#if TCG_TARGET_REG_BITS != 64 29#error "unsupported code generation mode" 30#endif 31 32#include "../tcg-pool.c.inc" 33#include "elf.h" 34 35/* ??? The translation blocks produced by TCG are generally small enough to 36 be entirely reachable with a 16-bit displacement. Leaving the option for 37 a 32-bit displacement here Just In Case. */ 38#define USE_LONG_BRANCHES 0 39 40#define TCG_CT_CONST_S16 0x100 41#define TCG_CT_CONST_S32 0x200 42#define TCG_CT_CONST_S33 0x400 43#define TCG_CT_CONST_ZERO 0x800 44 45/* Several places within the instruction set 0 means "no register" 46 rather than TCG_REG_R0. */ 47#define TCG_REG_NONE 0 48 49/* A scratch register that may be be used throughout the backend. */ 50#define TCG_TMP0 TCG_REG_R1 51 52/* A scratch register that holds a pointer to the beginning of the TB. 53 We don't need this when we have pc-relative loads with the general 54 instructions extension facility. */ 55#define TCG_REG_TB TCG_REG_R12 56#define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT)) 57 58#ifndef CONFIG_SOFTMMU 59#define TCG_GUEST_BASE_REG TCG_REG_R13 60#endif 61 62/* All of the following instructions are prefixed with their instruction 63 format, and are defined as 8- or 16-bit quantities, even when the two 64 halves of the 16-bit quantity may appear 32 bits apart in the insn. 65 This makes it easy to copy the values from the tables in Appendix B. */ 66typedef enum S390Opcode { 67 RIL_AFI = 0xc209, 68 RIL_AGFI = 0xc208, 69 RIL_ALFI = 0xc20b, 70 RIL_ALGFI = 0xc20a, 71 RIL_BRASL = 0xc005, 72 RIL_BRCL = 0xc004, 73 RIL_CFI = 0xc20d, 74 RIL_CGFI = 0xc20c, 75 RIL_CLFI = 0xc20f, 76 RIL_CLGFI = 0xc20e, 77 RIL_CLRL = 0xc60f, 78 RIL_CLGRL = 0xc60a, 79 RIL_CRL = 0xc60d, 80 RIL_CGRL = 0xc608, 81 RIL_IIHF = 0xc008, 82 RIL_IILF = 0xc009, 83 RIL_LARL = 0xc000, 84 RIL_LGFI = 0xc001, 85 RIL_LGRL = 0xc408, 86 RIL_LLIHF = 0xc00e, 87 RIL_LLILF = 0xc00f, 88 RIL_LRL = 0xc40d, 89 RIL_MSFI = 0xc201, 90 RIL_MSGFI = 0xc200, 91 RIL_NIHF = 0xc00a, 92 RIL_NILF = 0xc00b, 93 RIL_OIHF = 0xc00c, 94 RIL_OILF = 0xc00d, 95 RIL_SLFI = 0xc205, 96 RIL_SLGFI = 0xc204, 97 RIL_XIHF = 0xc006, 98 RIL_XILF = 0xc007, 99 100 RI_AGHI = 0xa70b, 101 RI_AHI = 0xa70a, 102 RI_BRC = 0xa704, 103 RI_CHI = 0xa70e, 104 RI_CGHI = 0xa70f, 105 RI_IIHH = 0xa500, 106 RI_IIHL = 0xa501, 107 RI_IILH = 0xa502, 108 RI_IILL = 0xa503, 109 RI_LGHI = 0xa709, 110 RI_LLIHH = 0xa50c, 111 RI_LLIHL = 0xa50d, 112 RI_LLILH = 0xa50e, 113 RI_LLILL = 0xa50f, 114 RI_MGHI = 0xa70d, 115 RI_MHI = 0xa70c, 116 RI_NIHH = 0xa504, 117 RI_NIHL = 0xa505, 118 RI_NILH = 0xa506, 119 RI_NILL = 0xa507, 120 RI_OIHH = 0xa508, 121 RI_OIHL = 0xa509, 122 RI_OILH = 0xa50a, 123 RI_OILL = 0xa50b, 124 125 RIE_CGIJ = 0xec7c, 126 RIE_CGRJ = 0xec64, 127 RIE_CIJ = 0xec7e, 128 RIE_CLGRJ = 0xec65, 129 RIE_CLIJ = 0xec7f, 130 RIE_CLGIJ = 0xec7d, 131 RIE_CLRJ = 0xec77, 132 RIE_CRJ = 0xec76, 133 RIE_LOCGHI = 0xec46, 134 RIE_RISBG = 0xec55, 135 136 RRE_AGR = 0xb908, 137 RRE_ALGR = 0xb90a, 138 RRE_ALCR = 0xb998, 139 RRE_ALCGR = 0xb988, 140 RRE_CGR = 0xb920, 141 RRE_CLGR = 0xb921, 142 RRE_DLGR = 0xb987, 143 RRE_DLR = 0xb997, 144 RRE_DSGFR = 0xb91d, 145 RRE_DSGR = 0xb90d, 146 RRE_FLOGR = 0xb983, 147 RRE_LGBR = 0xb906, 148 RRE_LCGR = 0xb903, 149 RRE_LGFR = 0xb914, 150 RRE_LGHR = 0xb907, 151 RRE_LGR = 0xb904, 152 RRE_LLGCR = 0xb984, 153 RRE_LLGFR = 0xb916, 154 RRE_LLGHR = 0xb985, 155 RRE_LRVR = 0xb91f, 156 RRE_LRVGR = 0xb90f, 157 RRE_LTGR = 0xb902, 158 RRE_MLGR = 0xb986, 159 RRE_MSGR = 0xb90c, 160 RRE_MSR = 0xb252, 161 RRE_NGR = 0xb980, 162 RRE_OGR = 0xb981, 163 RRE_SGR = 0xb909, 164 RRE_SLGR = 0xb90b, 165 RRE_SLBR = 0xb999, 166 RRE_SLBGR = 0xb989, 167 RRE_XGR = 0xb982, 168 169 RRF_LOCR = 0xb9f2, 170 RRF_LOCGR = 0xb9e2, 171 RRF_NRK = 0xb9f4, 172 RRF_NGRK = 0xb9e4, 173 RRF_ORK = 0xb9f6, 174 RRF_OGRK = 0xb9e6, 175 RRF_SRK = 0xb9f9, 176 RRF_SGRK = 0xb9e9, 177 RRF_SLRK = 0xb9fb, 178 RRF_SLGRK = 0xb9eb, 179 RRF_XRK = 0xb9f7, 180 RRF_XGRK = 0xb9e7, 181 182 RR_AR = 0x1a, 183 RR_ALR = 0x1e, 184 RR_BASR = 0x0d, 185 RR_BCR = 0x07, 186 RR_CLR = 0x15, 187 RR_CR = 0x19, 188 RR_DR = 0x1d, 189 RR_LCR = 0x13, 190 RR_LR = 0x18, 191 RR_LTR = 0x12, 192 RR_NR = 0x14, 193 RR_OR = 0x16, 194 RR_SR = 0x1b, 195 RR_SLR = 0x1f, 196 RR_XR = 0x17, 197 198 RSY_RLL = 0xeb1d, 199 RSY_RLLG = 0xeb1c, 200 RSY_SLLG = 0xeb0d, 201 RSY_SLLK = 0xebdf, 202 RSY_SRAG = 0xeb0a, 203 RSY_SRAK = 0xebdc, 204 RSY_SRLG = 0xeb0c, 205 RSY_SRLK = 0xebde, 206 207 RS_SLL = 0x89, 208 RS_SRA = 0x8a, 209 RS_SRL = 0x88, 210 211 RXY_AG = 0xe308, 212 RXY_AY = 0xe35a, 213 RXY_CG = 0xe320, 214 RXY_CLG = 0xe321, 215 RXY_CLY = 0xe355, 216 RXY_CY = 0xe359, 217 RXY_LAY = 0xe371, 218 RXY_LB = 0xe376, 219 RXY_LG = 0xe304, 220 RXY_LGB = 0xe377, 221 RXY_LGF = 0xe314, 222 RXY_LGH = 0xe315, 223 RXY_LHY = 0xe378, 224 RXY_LLGC = 0xe390, 225 RXY_LLGF = 0xe316, 226 RXY_LLGH = 0xe391, 227 RXY_LMG = 0xeb04, 228 RXY_LRV = 0xe31e, 229 RXY_LRVG = 0xe30f, 230 RXY_LRVH = 0xe31f, 231 RXY_LY = 0xe358, 232 RXY_NG = 0xe380, 233 RXY_OG = 0xe381, 234 RXY_STCY = 0xe372, 235 RXY_STG = 0xe324, 236 RXY_STHY = 0xe370, 237 RXY_STMG = 0xeb24, 238 RXY_STRV = 0xe33e, 239 RXY_STRVG = 0xe32f, 240 RXY_STRVH = 0xe33f, 241 RXY_STY = 0xe350, 242 RXY_XG = 0xe382, 243 244 RX_A = 0x5a, 245 RX_C = 0x59, 246 RX_L = 0x58, 247 RX_LA = 0x41, 248 RX_LH = 0x48, 249 RX_ST = 0x50, 250 RX_STC = 0x42, 251 RX_STH = 0x40, 252 253 NOP = 0x0707, 254} S390Opcode; 255 256#ifdef CONFIG_DEBUG_TCG 257static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 258 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", 259 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15" 260}; 261#endif 262 263/* Since R6 is a potential argument register, choose it last of the 264 call-saved registers. Likewise prefer the call-clobbered registers 265 in reverse order to maximize the chance of avoiding the arguments. */ 266static const int tcg_target_reg_alloc_order[] = { 267 /* Call saved registers. */ 268 TCG_REG_R13, 269 TCG_REG_R12, 270 TCG_REG_R11, 271 TCG_REG_R10, 272 TCG_REG_R9, 273 TCG_REG_R8, 274 TCG_REG_R7, 275 TCG_REG_R6, 276 /* Call clobbered registers. */ 277 TCG_REG_R14, 278 TCG_REG_R0, 279 TCG_REG_R1, 280 /* Argument registers, in reverse order of allocation. */ 281 TCG_REG_R5, 282 TCG_REG_R4, 283 TCG_REG_R3, 284 TCG_REG_R2, 285}; 286 287static const int tcg_target_call_iarg_regs[] = { 288 TCG_REG_R2, 289 TCG_REG_R3, 290 TCG_REG_R4, 291 TCG_REG_R5, 292 TCG_REG_R6, 293}; 294 295static const int tcg_target_call_oarg_regs[] = { 296 TCG_REG_R2, 297}; 298 299#define S390_CC_EQ 8 300#define S390_CC_LT 4 301#define S390_CC_GT 2 302#define S390_CC_OV 1 303#define S390_CC_NE (S390_CC_LT | S390_CC_GT) 304#define S390_CC_LE (S390_CC_LT | S390_CC_EQ) 305#define S390_CC_GE (S390_CC_GT | S390_CC_EQ) 306#define S390_CC_NEVER 0 307#define S390_CC_ALWAYS 15 308 309/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */ 310static const uint8_t tcg_cond_to_s390_cond[] = { 311 [TCG_COND_EQ] = S390_CC_EQ, 312 [TCG_COND_NE] = S390_CC_NE, 313 [TCG_COND_LT] = S390_CC_LT, 314 [TCG_COND_LE] = S390_CC_LE, 315 [TCG_COND_GT] = S390_CC_GT, 316 [TCG_COND_GE] = S390_CC_GE, 317 [TCG_COND_LTU] = S390_CC_LT, 318 [TCG_COND_LEU] = S390_CC_LE, 319 [TCG_COND_GTU] = S390_CC_GT, 320 [TCG_COND_GEU] = S390_CC_GE, 321}; 322 323/* Condition codes that result from a LOAD AND TEST. Here, we have no 324 unsigned instruction variation, however since the test is vs zero we 325 can re-map the outcomes appropriately. */ 326static const uint8_t tcg_cond_to_ltr_cond[] = { 327 [TCG_COND_EQ] = S390_CC_EQ, 328 [TCG_COND_NE] = S390_CC_NE, 329 [TCG_COND_LT] = S390_CC_LT, 330 [TCG_COND_LE] = S390_CC_LE, 331 [TCG_COND_GT] = S390_CC_GT, 332 [TCG_COND_GE] = S390_CC_GE, 333 [TCG_COND_LTU] = S390_CC_NEVER, 334 [TCG_COND_LEU] = S390_CC_EQ, 335 [TCG_COND_GTU] = S390_CC_NE, 336 [TCG_COND_GEU] = S390_CC_ALWAYS, 337}; 338 339#ifdef CONFIG_SOFTMMU 340static void * const qemu_ld_helpers[16] = { 341 [MO_UB] = helper_ret_ldub_mmu, 342 [MO_SB] = helper_ret_ldsb_mmu, 343 [MO_LEUW] = helper_le_lduw_mmu, 344 [MO_LESW] = helper_le_ldsw_mmu, 345 [MO_LEUL] = helper_le_ldul_mmu, 346 [MO_LESL] = helper_le_ldsl_mmu, 347 [MO_LEQ] = helper_le_ldq_mmu, 348 [MO_BEUW] = helper_be_lduw_mmu, 349 [MO_BESW] = helper_be_ldsw_mmu, 350 [MO_BEUL] = helper_be_ldul_mmu, 351 [MO_BESL] = helper_be_ldsl_mmu, 352 [MO_BEQ] = helper_be_ldq_mmu, 353}; 354 355static void * const qemu_st_helpers[16] = { 356 [MO_UB] = helper_ret_stb_mmu, 357 [MO_LEUW] = helper_le_stw_mmu, 358 [MO_LEUL] = helper_le_stl_mmu, 359 [MO_LEQ] = helper_le_stq_mmu, 360 [MO_BEUW] = helper_be_stw_mmu, 361 [MO_BEUL] = helper_be_stl_mmu, 362 [MO_BEQ] = helper_be_stq_mmu, 363}; 364#endif 365 366static tcg_insn_unit *tb_ret_addr; 367uint64_t s390_facilities; 368 369static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 370 intptr_t value, intptr_t addend) 371{ 372 intptr_t pcrel2; 373 uint32_t old; 374 375 value += addend; 376 pcrel2 = (tcg_insn_unit *)value - code_ptr; 377 378 switch (type) { 379 case R_390_PC16DBL: 380 if (pcrel2 == (int16_t)pcrel2) { 381 tcg_patch16(code_ptr, pcrel2); 382 return true; 383 } 384 break; 385 case R_390_PC32DBL: 386 if (pcrel2 == (int32_t)pcrel2) { 387 tcg_patch32(code_ptr, pcrel2); 388 return true; 389 } 390 break; 391 case R_390_20: 392 if (value == sextract64(value, 0, 20)) { 393 old = *(uint32_t *)code_ptr & 0xf00000ff; 394 old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4); 395 tcg_patch32(code_ptr, old); 396 return true; 397 } 398 break; 399 default: 400 g_assert_not_reached(); 401 } 402 return false; 403} 404 405/* parse target specific constraints */ 406static const char *target_parse_constraint(TCGArgConstraint *ct, 407 const char *ct_str, TCGType type) 408{ 409 switch (*ct_str++) { 410 case 'r': /* all registers */ 411 ct->regs = 0xffff; 412 break; 413 case 'L': /* qemu_ld/st constraint */ 414 ct->regs = 0xffff; 415 tcg_regset_reset_reg(ct->regs, TCG_REG_R2); 416 tcg_regset_reset_reg(ct->regs, TCG_REG_R3); 417 tcg_regset_reset_reg(ct->regs, TCG_REG_R4); 418 break; 419 case 'a': /* force R2 for division */ 420 ct->regs = 0; 421 tcg_regset_set_reg(ct->regs, TCG_REG_R2); 422 break; 423 case 'b': /* force R3 for division */ 424 ct->regs = 0; 425 tcg_regset_set_reg(ct->regs, TCG_REG_R3); 426 break; 427 case 'A': 428 ct->ct |= TCG_CT_CONST_S33; 429 break; 430 case 'I': 431 ct->ct |= TCG_CT_CONST_S16; 432 break; 433 case 'J': 434 ct->ct |= TCG_CT_CONST_S32; 435 break; 436 case 'Z': 437 ct->ct |= TCG_CT_CONST_ZERO; 438 break; 439 default: 440 return NULL; 441 } 442 return ct_str; 443} 444 445/* Test if a constant matches the constraint. */ 446static int tcg_target_const_match(tcg_target_long val, TCGType type, 447 const TCGArgConstraint *arg_ct) 448{ 449 int ct = arg_ct->ct; 450 451 if (ct & TCG_CT_CONST) { 452 return 1; 453 } 454 455 if (type == TCG_TYPE_I32) { 456 val = (int32_t)val; 457 } 458 459 /* The following are mutually exclusive. */ 460 if (ct & TCG_CT_CONST_S16) { 461 return val == (int16_t)val; 462 } else if (ct & TCG_CT_CONST_S32) { 463 return val == (int32_t)val; 464 } else if (ct & TCG_CT_CONST_S33) { 465 return val >= -0xffffffffll && val <= 0xffffffffll; 466 } else if (ct & TCG_CT_CONST_ZERO) { 467 return val == 0; 468 } 469 470 return 0; 471} 472 473/* Emit instructions according to the given instruction format. */ 474 475static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2) 476{ 477 tcg_out16(s, (op << 8) | (r1 << 4) | r2); 478} 479 480static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op, 481 TCGReg r1, TCGReg r2) 482{ 483 tcg_out32(s, (op << 16) | (r1 << 4) | r2); 484} 485 486static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op, 487 TCGReg r1, TCGReg r2, int m3) 488{ 489 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2); 490} 491 492static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2) 493{ 494 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff)); 495} 496 497static void tcg_out_insn_RIE(TCGContext *s, S390Opcode op, TCGReg r1, 498 int i2, int m3) 499{ 500 tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3); 501 tcg_out32(s, (i2 << 16) | (op & 0xff)); 502} 503 504static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2) 505{ 506 tcg_out16(s, op | (r1 << 4)); 507 tcg_out32(s, i2); 508} 509 510static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1, 511 TCGReg b2, TCGReg r3, int disp) 512{ 513 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12) 514 | (disp & 0xfff)); 515} 516 517static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1, 518 TCGReg b2, TCGReg r3, int disp) 519{ 520 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3); 521 tcg_out32(s, (op & 0xff) | (b2 << 28) 522 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4)); 523} 524 525#define tcg_out_insn_RX tcg_out_insn_RS 526#define tcg_out_insn_RXY tcg_out_insn_RSY 527 528/* Emit an opcode with "type-checking" of the format. */ 529#define tcg_out_insn(S, FMT, OP, ...) \ 530 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__) 531 532 533/* emit 64-bit shifts */ 534static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest, 535 TCGReg src, TCGReg sh_reg, int sh_imm) 536{ 537 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm); 538} 539 540/* emit 32-bit shifts */ 541static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest, 542 TCGReg sh_reg, int sh_imm) 543{ 544 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm); 545} 546 547static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) 548{ 549 if (src != dst) { 550 if (type == TCG_TYPE_I32) { 551 tcg_out_insn(s, RR, LR, dst, src); 552 } else { 553 tcg_out_insn(s, RRE, LGR, dst, src); 554 } 555 } 556 return true; 557} 558 559static const S390Opcode lli_insns[4] = { 560 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH 561}; 562 563static bool maybe_out_small_movi(TCGContext *s, TCGType type, 564 TCGReg ret, tcg_target_long sval) 565{ 566 tcg_target_ulong uval = sval; 567 int i; 568 569 if (type == TCG_TYPE_I32) { 570 uval = (uint32_t)sval; 571 sval = (int32_t)sval; 572 } 573 574 /* Try all 32-bit insns that can load it in one go. */ 575 if (sval >= -0x8000 && sval < 0x8000) { 576 tcg_out_insn(s, RI, LGHI, ret, sval); 577 return true; 578 } 579 580 for (i = 0; i < 4; i++) { 581 tcg_target_long mask = 0xffffull << i*16; 582 if ((uval & mask) == uval) { 583 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16); 584 return true; 585 } 586 } 587 588 return false; 589} 590 591/* load a register with an immediate value */ 592static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, 593 tcg_target_long sval, bool in_prologue) 594{ 595 tcg_target_ulong uval; 596 597 /* Try all 32-bit insns that can load it in one go. */ 598 if (maybe_out_small_movi(s, type, ret, sval)) { 599 return; 600 } 601 602 uval = sval; 603 if (type == TCG_TYPE_I32) { 604 uval = (uint32_t)sval; 605 sval = (int32_t)sval; 606 } 607 608 /* Try all 48-bit insns that can load it in one go. */ 609 if (s390_facilities & FACILITY_EXT_IMM) { 610 if (sval == (int32_t)sval) { 611 tcg_out_insn(s, RIL, LGFI, ret, sval); 612 return; 613 } 614 if (uval <= 0xffffffff) { 615 tcg_out_insn(s, RIL, LLILF, ret, uval); 616 return; 617 } 618 if ((uval & 0xffffffff) == 0) { 619 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 32); 620 return; 621 } 622 } 623 624 /* Try for PC-relative address load. For odd addresses, 625 attempt to use an offset from the start of the TB. */ 626 if ((sval & 1) == 0) { 627 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1; 628 if (off == (int32_t)off) { 629 tcg_out_insn(s, RIL, LARL, ret, off); 630 return; 631 } 632 } else if (USE_REG_TB && !in_prologue) { 633 ptrdiff_t off = sval - (uintptr_t)s->code_gen_ptr; 634 if (off == sextract64(off, 0, 20)) { 635 /* This is certain to be an address within TB, and therefore 636 OFF will be negative; don't try RX_LA. */ 637 tcg_out_insn(s, RXY, LAY, ret, TCG_REG_TB, TCG_REG_NONE, off); 638 return; 639 } 640 } 641 642 /* A 32-bit unsigned value can be loaded in 2 insns. And given 643 that LLILL, LLIHL, LLILF above did not succeed, we know that 644 both insns are required. */ 645 if (uval <= 0xffffffff) { 646 tcg_out_insn(s, RI, LLILL, ret, uval); 647 tcg_out_insn(s, RI, IILH, ret, uval >> 16); 648 return; 649 } 650 651 /* Otherwise, stuff it in the constant pool. */ 652 if (s390_facilities & FACILITY_GEN_INST_EXT) { 653 tcg_out_insn(s, RIL, LGRL, ret, 0); 654 new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2); 655 } else if (USE_REG_TB && !in_prologue) { 656 tcg_out_insn(s, RXY, LG, ret, TCG_REG_TB, TCG_REG_NONE, 0); 657 new_pool_label(s, sval, R_390_20, s->code_ptr - 2, 658 -(intptr_t)s->code_gen_ptr); 659 } else { 660 TCGReg base = ret ? ret : TCG_TMP0; 661 tcg_out_insn(s, RIL, LARL, base, 0); 662 new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2); 663 tcg_out_insn(s, RXY, LG, ret, base, TCG_REG_NONE, 0); 664 } 665} 666 667static void tcg_out_movi(TCGContext *s, TCGType type, 668 TCGReg ret, tcg_target_long sval) 669{ 670 tcg_out_movi_int(s, type, ret, sval, false); 671} 672 673/* Emit a load/store type instruction. Inputs are: 674 DATA: The register to be loaded or stored. 675 BASE+OFS: The effective address. 676 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0. 677 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */ 678 679static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy, 680 TCGReg data, TCGReg base, TCGReg index, 681 tcg_target_long ofs) 682{ 683 if (ofs < -0x80000 || ofs >= 0x80000) { 684 /* Combine the low 20 bits of the offset with the actual load insn; 685 the high 44 bits must come from an immediate load. */ 686 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000; 687 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low); 688 ofs = low; 689 690 /* If we were already given an index register, add it in. */ 691 if (index != TCG_REG_NONE) { 692 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index); 693 } 694 index = TCG_TMP0; 695 } 696 697 if (opc_rx && ofs >= 0 && ofs < 0x1000) { 698 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs); 699 } else { 700 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs); 701 } 702} 703 704 705/* load data without address translation or endianness conversion */ 706static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data, 707 TCGReg base, intptr_t ofs) 708{ 709 if (type == TCG_TYPE_I32) { 710 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs); 711 } else { 712 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs); 713 } 714} 715 716static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data, 717 TCGReg base, intptr_t ofs) 718{ 719 if (type == TCG_TYPE_I32) { 720 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs); 721 } else { 722 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs); 723 } 724} 725 726static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 727 TCGReg base, intptr_t ofs) 728{ 729 return false; 730} 731 732/* load data from an absolute host address */ 733static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs) 734{ 735 intptr_t addr = (intptr_t)abs; 736 737 if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) { 738 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1; 739 if (disp == (int32_t)disp) { 740 if (type == TCG_TYPE_I32) { 741 tcg_out_insn(s, RIL, LRL, dest, disp); 742 } else { 743 tcg_out_insn(s, RIL, LGRL, dest, disp); 744 } 745 return; 746 } 747 } 748 if (USE_REG_TB) { 749 ptrdiff_t disp = abs - (void *)s->code_gen_ptr; 750 if (disp == sextract64(disp, 0, 20)) { 751 tcg_out_ld(s, type, dest, TCG_REG_TB, disp); 752 return; 753 } 754 } 755 756 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff); 757 tcg_out_ld(s, type, dest, dest, addr & 0xffff); 758} 759 760static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src, 761 int msb, int lsb, int ofs, int z) 762{ 763 /* Format RIE-f */ 764 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src); 765 tcg_out16(s, (msb << 8) | (z << 7) | lsb); 766 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff)); 767} 768 769static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) 770{ 771 if (s390_facilities & FACILITY_EXT_IMM) { 772 tcg_out_insn(s, RRE, LGBR, dest, src); 773 return; 774 } 775 776 if (type == TCG_TYPE_I32) { 777 if (dest == src) { 778 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24); 779 } else { 780 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24); 781 } 782 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24); 783 } else { 784 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56); 785 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56); 786 } 787} 788 789static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) 790{ 791 if (s390_facilities & FACILITY_EXT_IMM) { 792 tcg_out_insn(s, RRE, LLGCR, dest, src); 793 return; 794 } 795 796 if (dest == src) { 797 tcg_out_movi(s, type, TCG_TMP0, 0xff); 798 src = TCG_TMP0; 799 } else { 800 tcg_out_movi(s, type, dest, 0xff); 801 } 802 if (type == TCG_TYPE_I32) { 803 tcg_out_insn(s, RR, NR, dest, src); 804 } else { 805 tcg_out_insn(s, RRE, NGR, dest, src); 806 } 807} 808 809static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) 810{ 811 if (s390_facilities & FACILITY_EXT_IMM) { 812 tcg_out_insn(s, RRE, LGHR, dest, src); 813 return; 814 } 815 816 if (type == TCG_TYPE_I32) { 817 if (dest == src) { 818 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16); 819 } else { 820 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16); 821 } 822 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16); 823 } else { 824 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48); 825 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48); 826 } 827} 828 829static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) 830{ 831 if (s390_facilities & FACILITY_EXT_IMM) { 832 tcg_out_insn(s, RRE, LLGHR, dest, src); 833 return; 834 } 835 836 if (dest == src) { 837 tcg_out_movi(s, type, TCG_TMP0, 0xffff); 838 src = TCG_TMP0; 839 } else { 840 tcg_out_movi(s, type, dest, 0xffff); 841 } 842 if (type == TCG_TYPE_I32) { 843 tcg_out_insn(s, RR, NR, dest, src); 844 } else { 845 tcg_out_insn(s, RRE, NGR, dest, src); 846 } 847} 848 849static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src) 850{ 851 tcg_out_insn(s, RRE, LGFR, dest, src); 852} 853 854static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src) 855{ 856 tcg_out_insn(s, RRE, LLGFR, dest, src); 857} 858 859/* Accept bit patterns like these: 860 0....01....1 861 1....10....0 862 1..10..01..1 863 0..01..10..0 864 Copied from gcc sources. */ 865static inline bool risbg_mask(uint64_t c) 866{ 867 uint64_t lsb; 868 /* We don't change the number of transitions by inverting, 869 so make sure we start with the LSB zero. */ 870 if (c & 1) { 871 c = ~c; 872 } 873 /* Reject all zeros or all ones. */ 874 if (c == 0) { 875 return false; 876 } 877 /* Find the first transition. */ 878 lsb = c & -c; 879 /* Invert to look for a second transition. */ 880 c = ~c; 881 /* Erase the first transition. */ 882 c &= -lsb; 883 /* Find the second transition, if any. */ 884 lsb = c & -c; 885 /* Match if all the bits are 1's, or if c is zero. */ 886 return c == -lsb; 887} 888 889static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val) 890{ 891 int msb, lsb; 892 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) { 893 /* Achieve wraparound by swapping msb and lsb. */ 894 msb = 64 - ctz64(~val); 895 lsb = clz64(~val) - 1; 896 } else { 897 msb = clz64(val); 898 lsb = 63 - ctz64(val); 899 } 900 tcg_out_risbg(s, out, in, msb, lsb, 0, 1); 901} 902 903static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) 904{ 905 static const S390Opcode ni_insns[4] = { 906 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH 907 }; 908 static const S390Opcode nif_insns[2] = { 909 RIL_NILF, RIL_NIHF 910 }; 911 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull); 912 int i; 913 914 /* Look for the zero-extensions. */ 915 if ((val & valid) == 0xffffffff) { 916 tgen_ext32u(s, dest, dest); 917 return; 918 } 919 if (s390_facilities & FACILITY_EXT_IMM) { 920 if ((val & valid) == 0xff) { 921 tgen_ext8u(s, TCG_TYPE_I64, dest, dest); 922 return; 923 } 924 if ((val & valid) == 0xffff) { 925 tgen_ext16u(s, TCG_TYPE_I64, dest, dest); 926 return; 927 } 928 } 929 930 /* Try all 32-bit insns that can perform it in one go. */ 931 for (i = 0; i < 4; i++) { 932 tcg_target_ulong mask = ~(0xffffull << i*16); 933 if (((val | ~valid) & mask) == mask) { 934 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16); 935 return; 936 } 937 } 938 939 /* Try all 48-bit insns that can perform it in one go. */ 940 if (s390_facilities & FACILITY_EXT_IMM) { 941 for (i = 0; i < 2; i++) { 942 tcg_target_ulong mask = ~(0xffffffffull << i*32); 943 if (((val | ~valid) & mask) == mask) { 944 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32); 945 return; 946 } 947 } 948 } 949 if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) { 950 tgen_andi_risbg(s, dest, dest, val); 951 return; 952 } 953 954 /* Use the constant pool if USE_REG_TB, but not for small constants. */ 955 if (USE_REG_TB) { 956 if (!maybe_out_small_movi(s, type, TCG_TMP0, val)) { 957 tcg_out_insn(s, RXY, NG, dest, TCG_REG_TB, TCG_REG_NONE, 0); 958 new_pool_label(s, val & valid, R_390_20, s->code_ptr - 2, 959 -(intptr_t)s->code_gen_ptr); 960 return; 961 } 962 } else { 963 tcg_out_movi(s, type, TCG_TMP0, val); 964 } 965 if (type == TCG_TYPE_I32) { 966 tcg_out_insn(s, RR, NR, dest, TCG_TMP0); 967 } else { 968 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0); 969 } 970} 971 972static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) 973{ 974 static const S390Opcode oi_insns[4] = { 975 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH 976 }; 977 static const S390Opcode oif_insns[2] = { 978 RIL_OILF, RIL_OIHF 979 }; 980 981 int i; 982 983 /* Look for no-op. */ 984 if (unlikely(val == 0)) { 985 return; 986 } 987 988 /* Try all 32-bit insns that can perform it in one go. */ 989 for (i = 0; i < 4; i++) { 990 tcg_target_ulong mask = (0xffffull << i*16); 991 if ((val & mask) != 0 && (val & ~mask) == 0) { 992 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16); 993 return; 994 } 995 } 996 997 /* Try all 48-bit insns that can perform it in one go. */ 998 if (s390_facilities & FACILITY_EXT_IMM) { 999 for (i = 0; i < 2; i++) { 1000 tcg_target_ulong mask = (0xffffffffull << i*32); 1001 if ((val & mask) != 0 && (val & ~mask) == 0) { 1002 tcg_out_insn_RIL(s, oif_insns[i], dest, val >> i*32); 1003 return; 1004 } 1005 } 1006 } 1007 1008 /* Use the constant pool if USE_REG_TB, but not for small constants. */ 1009 if (maybe_out_small_movi(s, type, TCG_TMP0, val)) { 1010 if (type == TCG_TYPE_I32) { 1011 tcg_out_insn(s, RR, OR, dest, TCG_TMP0); 1012 } else { 1013 tcg_out_insn(s, RRE, OGR, dest, TCG_TMP0); 1014 } 1015 } else if (USE_REG_TB) { 1016 tcg_out_insn(s, RXY, OG, dest, TCG_REG_TB, TCG_REG_NONE, 0); 1017 new_pool_label(s, val, R_390_20, s->code_ptr - 2, 1018 -(intptr_t)s->code_gen_ptr); 1019 } else { 1020 /* Perform the OR via sequential modifications to the high and 1021 low parts. Do this via recursion to handle 16-bit vs 32-bit 1022 masks in each half. */ 1023 tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM); 1024 tgen_ori(s, type, dest, val & 0x00000000ffffffffull); 1025 tgen_ori(s, type, dest, val & 0xffffffff00000000ull); 1026 } 1027} 1028 1029static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) 1030{ 1031 /* Try all 48-bit insns that can perform it in one go. */ 1032 if (s390_facilities & FACILITY_EXT_IMM) { 1033 if ((val & 0xffffffff00000000ull) == 0) { 1034 tcg_out_insn(s, RIL, XILF, dest, val); 1035 return; 1036 } 1037 if ((val & 0x00000000ffffffffull) == 0) { 1038 tcg_out_insn(s, RIL, XIHF, dest, val >> 32); 1039 return; 1040 } 1041 } 1042 1043 /* Use the constant pool if USE_REG_TB, but not for small constants. */ 1044 if (maybe_out_small_movi(s, type, TCG_TMP0, val)) { 1045 if (type == TCG_TYPE_I32) { 1046 tcg_out_insn(s, RR, XR, dest, TCG_TMP0); 1047 } else { 1048 tcg_out_insn(s, RRE, XGR, dest, TCG_TMP0); 1049 } 1050 } else if (USE_REG_TB) { 1051 tcg_out_insn(s, RXY, XG, dest, TCG_REG_TB, TCG_REG_NONE, 0); 1052 new_pool_label(s, val, R_390_20, s->code_ptr - 2, 1053 -(intptr_t)s->code_gen_ptr); 1054 } else { 1055 /* Perform the xor by parts. */ 1056 tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM); 1057 if (val & 0xffffffff) { 1058 tcg_out_insn(s, RIL, XILF, dest, val); 1059 } 1060 if (val > 0xffffffff) { 1061 tcg_out_insn(s, RIL, XIHF, dest, val >> 32); 1062 } 1063 } 1064} 1065 1066static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, 1067 TCGArg c2, bool c2const, bool need_carry) 1068{ 1069 bool is_unsigned = is_unsigned_cond(c); 1070 S390Opcode op; 1071 1072 if (c2const) { 1073 if (c2 == 0) { 1074 if (!(is_unsigned && need_carry)) { 1075 if (type == TCG_TYPE_I32) { 1076 tcg_out_insn(s, RR, LTR, r1, r1); 1077 } else { 1078 tcg_out_insn(s, RRE, LTGR, r1, r1); 1079 } 1080 return tcg_cond_to_ltr_cond[c]; 1081 } 1082 } 1083 1084 if (!is_unsigned && c2 == (int16_t)c2) { 1085 op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI); 1086 tcg_out_insn_RI(s, op, r1, c2); 1087 goto exit; 1088 } 1089 1090 if (s390_facilities & FACILITY_EXT_IMM) { 1091 if (type == TCG_TYPE_I32) { 1092 op = (is_unsigned ? RIL_CLFI : RIL_CFI); 1093 tcg_out_insn_RIL(s, op, r1, c2); 1094 goto exit; 1095 } else if (c2 == (is_unsigned ? (uint32_t)c2 : (int32_t)c2)) { 1096 op = (is_unsigned ? RIL_CLGFI : RIL_CGFI); 1097 tcg_out_insn_RIL(s, op, r1, c2); 1098 goto exit; 1099 } 1100 } 1101 1102 /* Use the constant pool, but not for small constants. */ 1103 if (maybe_out_small_movi(s, type, TCG_TMP0, c2)) { 1104 c2 = TCG_TMP0; 1105 /* fall through to reg-reg */ 1106 } else if (USE_REG_TB) { 1107 if (type == TCG_TYPE_I32) { 1108 op = (is_unsigned ? RXY_CLY : RXY_CY); 1109 tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0); 1110 new_pool_label(s, (uint32_t)c2, R_390_20, s->code_ptr - 2, 1111 4 - (intptr_t)s->code_gen_ptr); 1112 } else { 1113 op = (is_unsigned ? RXY_CLG : RXY_CG); 1114 tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0); 1115 new_pool_label(s, c2, R_390_20, s->code_ptr - 2, 1116 -(intptr_t)s->code_gen_ptr); 1117 } 1118 goto exit; 1119 } else { 1120 if (type == TCG_TYPE_I32) { 1121 op = (is_unsigned ? RIL_CLRL : RIL_CRL); 1122 tcg_out_insn_RIL(s, op, r1, 0); 1123 new_pool_label(s, (uint32_t)c2, R_390_PC32DBL, 1124 s->code_ptr - 2, 2 + 4); 1125 } else { 1126 op = (is_unsigned ? RIL_CLGRL : RIL_CGRL); 1127 tcg_out_insn_RIL(s, op, r1, 0); 1128 new_pool_label(s, c2, R_390_PC32DBL, s->code_ptr - 2, 2); 1129 } 1130 goto exit; 1131 } 1132 } 1133 1134 if (type == TCG_TYPE_I32) { 1135 op = (is_unsigned ? RR_CLR : RR_CR); 1136 tcg_out_insn_RR(s, op, r1, c2); 1137 } else { 1138 op = (is_unsigned ? RRE_CLGR : RRE_CGR); 1139 tcg_out_insn_RRE(s, op, r1, c2); 1140 } 1141 1142 exit: 1143 return tcg_cond_to_s390_cond[c]; 1144} 1145 1146static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, 1147 TCGReg dest, TCGReg c1, TCGArg c2, int c2const) 1148{ 1149 int cc; 1150 bool have_loc; 1151 1152 /* With LOC2, we can always emit the minimum 3 insns. */ 1153 if (s390_facilities & FACILITY_LOAD_ON_COND2) { 1154 /* Emit: d = 0, d = (cc ? 1 : d). */ 1155 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false); 1156 tcg_out_movi(s, TCG_TYPE_I64, dest, 0); 1157 tcg_out_insn(s, RIE, LOCGHI, dest, 1, cc); 1158 return; 1159 } 1160 1161 have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0; 1162 1163 /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */ 1164 restart: 1165 switch (cond) { 1166 case TCG_COND_NE: 1167 /* X != 0 is X > 0. */ 1168 if (c2const && c2 == 0) { 1169 cond = TCG_COND_GTU; 1170 } else { 1171 break; 1172 } 1173 /* fallthru */ 1174 1175 case TCG_COND_GTU: 1176 case TCG_COND_GT: 1177 /* The result of a compare has CC=2 for GT and CC=3 unused. 1178 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */ 1179 tgen_cmp(s, type, cond, c1, c2, c2const, true); 1180 tcg_out_movi(s, type, dest, 0); 1181 tcg_out_insn(s, RRE, ALCGR, dest, dest); 1182 return; 1183 1184 case TCG_COND_EQ: 1185 /* X == 0 is X <= 0. */ 1186 if (c2const && c2 == 0) { 1187 cond = TCG_COND_LEU; 1188 } else { 1189 break; 1190 } 1191 /* fallthru */ 1192 1193 case TCG_COND_LEU: 1194 case TCG_COND_LE: 1195 /* As above, but we're looking for borrow, or !carry. 1196 The second insn computes d - d - borrow, or -1 for true 1197 and 0 for false. So we must mask to 1 bit afterward. */ 1198 tgen_cmp(s, type, cond, c1, c2, c2const, true); 1199 tcg_out_insn(s, RRE, SLBGR, dest, dest); 1200 tgen_andi(s, type, dest, 1); 1201 return; 1202 1203 case TCG_COND_GEU: 1204 case TCG_COND_LTU: 1205 case TCG_COND_LT: 1206 case TCG_COND_GE: 1207 /* Swap operands so that we can use LEU/GTU/GT/LE. */ 1208 if (c2const) { 1209 if (have_loc) { 1210 break; 1211 } 1212 tcg_out_movi(s, type, TCG_TMP0, c2); 1213 c2 = c1; 1214 c2const = 0; 1215 c1 = TCG_TMP0; 1216 } else { 1217 TCGReg t = c1; 1218 c1 = c2; 1219 c2 = t; 1220 } 1221 cond = tcg_swap_cond(cond); 1222 goto restart; 1223 1224 default: 1225 g_assert_not_reached(); 1226 } 1227 1228 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false); 1229 if (have_loc) { 1230 /* Emit: d = 0, t = 1, d = (cc ? t : d). */ 1231 tcg_out_movi(s, TCG_TYPE_I64, dest, 0); 1232 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1); 1233 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc); 1234 } else { 1235 /* Emit: d = 1; if (cc) goto over; d = 0; over: */ 1236 tcg_out_movi(s, type, dest, 1); 1237 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1); 1238 tcg_out_movi(s, type, dest, 0); 1239 } 1240} 1241 1242static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest, 1243 TCGReg c1, TCGArg c2, int c2const, 1244 TCGArg v3, int v3const) 1245{ 1246 int cc; 1247 if (s390_facilities & FACILITY_LOAD_ON_COND) { 1248 cc = tgen_cmp(s, type, c, c1, c2, c2const, false); 1249 if (v3const) { 1250 tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc); 1251 } else { 1252 tcg_out_insn(s, RRF, LOCGR, dest, v3, cc); 1253 } 1254 } else { 1255 c = tcg_invert_cond(c); 1256 cc = tgen_cmp(s, type, c, c1, c2, c2const, false); 1257 1258 /* Emit: if (cc) goto over; dest = r3; over: */ 1259 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1); 1260 tcg_out_insn(s, RRE, LGR, dest, v3); 1261 } 1262} 1263 1264static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1, 1265 TCGArg a2, int a2const) 1266{ 1267 /* Since this sets both R and R+1, we have no choice but to store the 1268 result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */ 1269 QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1); 1270 tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1); 1271 1272 if (a2const && a2 == 64) { 1273 tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0); 1274 } else { 1275 if (a2const) { 1276 tcg_out_movi(s, TCG_TYPE_I64, dest, a2); 1277 } else { 1278 tcg_out_mov(s, TCG_TYPE_I64, dest, a2); 1279 } 1280 if (s390_facilities & FACILITY_LOAD_ON_COND) { 1281 /* Emit: if (one bit found) dest = r0. */ 1282 tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2); 1283 } else { 1284 /* Emit: if (no one bit found) goto over; dest = r0; over: */ 1285 tcg_out_insn(s, RI, BRC, 8, (4 + 4) >> 1); 1286 tcg_out_insn(s, RRE, LGR, dest, TCG_REG_R0); 1287 } 1288 } 1289} 1290 1291static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src, 1292 int ofs, int len, int z) 1293{ 1294 int lsb = (63 - ofs); 1295 int msb = lsb - (len - 1); 1296 tcg_out_risbg(s, dest, src, msb, lsb, ofs, z); 1297} 1298 1299static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src, 1300 int ofs, int len) 1301{ 1302 tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1); 1303} 1304 1305static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest) 1306{ 1307 ptrdiff_t off = dest - s->code_ptr; 1308 if (off == (int16_t)off) { 1309 tcg_out_insn(s, RI, BRC, cc, off); 1310 } else if (off == (int32_t)off) { 1311 tcg_out_insn(s, RIL, BRCL, cc, off); 1312 } else { 1313 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest); 1314 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0); 1315 } 1316} 1317 1318static void tgen_branch(TCGContext *s, int cc, TCGLabel *l) 1319{ 1320 if (l->has_value) { 1321 tgen_gotoi(s, cc, l->u.value_ptr); 1322 } else if (USE_LONG_BRANCHES) { 1323 tcg_out16(s, RIL_BRCL | (cc << 4)); 1324 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, 2); 1325 s->code_ptr += 2; 1326 } else { 1327 tcg_out16(s, RI_BRC | (cc << 4)); 1328 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2); 1329 s->code_ptr += 1; 1330 } 1331} 1332 1333static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc, 1334 TCGReg r1, TCGReg r2, TCGLabel *l) 1335{ 1336 intptr_t off = 0; 1337 1338 if (l->has_value) { 1339 off = l->u.value_ptr - s->code_ptr; 1340 tcg_debug_assert(off == (int16_t)off); 1341 } else { 1342 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2); 1343 } 1344 1345 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2); 1346 tcg_out16(s, off); 1347 tcg_out16(s, cc << 12 | (opc & 0xff)); 1348} 1349 1350static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc, 1351 TCGReg r1, int i2, TCGLabel *l) 1352{ 1353 tcg_target_long off = 0; 1354 1355 if (l->has_value) { 1356 off = l->u.value_ptr - s->code_ptr; 1357 tcg_debug_assert(off == (int16_t)off); 1358 } else { 1359 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2); 1360 } 1361 1362 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc); 1363 tcg_out16(s, off); 1364 tcg_out16(s, (i2 << 8) | (opc & 0xff)); 1365} 1366 1367static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, 1368 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l) 1369{ 1370 int cc; 1371 1372 if (s390_facilities & FACILITY_GEN_INST_EXT) { 1373 bool is_unsigned = is_unsigned_cond(c); 1374 bool in_range; 1375 S390Opcode opc; 1376 1377 cc = tcg_cond_to_s390_cond[c]; 1378 1379 if (!c2const) { 1380 opc = (type == TCG_TYPE_I32 1381 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ) 1382 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ)); 1383 tgen_compare_branch(s, opc, cc, r1, c2, l); 1384 return; 1385 } 1386 1387 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field. 1388 If the immediate we've been given does not fit that range, we'll 1389 fall back to separate compare and branch instructions using the 1390 larger comparison range afforded by COMPARE IMMEDIATE. */ 1391 if (type == TCG_TYPE_I32) { 1392 if (is_unsigned) { 1393 opc = RIE_CLIJ; 1394 in_range = (uint32_t)c2 == (uint8_t)c2; 1395 } else { 1396 opc = RIE_CIJ; 1397 in_range = (int32_t)c2 == (int8_t)c2; 1398 } 1399 } else { 1400 if (is_unsigned) { 1401 opc = RIE_CLGIJ; 1402 in_range = (uint64_t)c2 == (uint8_t)c2; 1403 } else { 1404 opc = RIE_CGIJ; 1405 in_range = (int64_t)c2 == (int8_t)c2; 1406 } 1407 } 1408 if (in_range) { 1409 tgen_compare_imm_branch(s, opc, cc, r1, c2, l); 1410 return; 1411 } 1412 } 1413 1414 cc = tgen_cmp(s, type, c, r1, c2, c2const, false); 1415 tgen_branch(s, cc, l); 1416} 1417 1418static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) 1419{ 1420 ptrdiff_t off = dest - s->code_ptr; 1421 if (off == (int32_t)off) { 1422 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off); 1423 } else { 1424 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest); 1425 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0); 1426 } 1427} 1428 1429static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, 1430 TCGReg base, TCGReg index, int disp) 1431{ 1432 switch (opc & (MO_SSIZE | MO_BSWAP)) { 1433 case MO_UB: 1434 tcg_out_insn(s, RXY, LLGC, data, base, index, disp); 1435 break; 1436 case MO_SB: 1437 tcg_out_insn(s, RXY, LGB, data, base, index, disp); 1438 break; 1439 1440 case MO_UW | MO_BSWAP: 1441 /* swapped unsigned halfword load with upper bits zeroed */ 1442 tcg_out_insn(s, RXY, LRVH, data, base, index, disp); 1443 tgen_ext16u(s, TCG_TYPE_I64, data, data); 1444 break; 1445 case MO_UW: 1446 tcg_out_insn(s, RXY, LLGH, data, base, index, disp); 1447 break; 1448 1449 case MO_SW | MO_BSWAP: 1450 /* swapped sign-extended halfword load */ 1451 tcg_out_insn(s, RXY, LRVH, data, base, index, disp); 1452 tgen_ext16s(s, TCG_TYPE_I64, data, data); 1453 break; 1454 case MO_SW: 1455 tcg_out_insn(s, RXY, LGH, data, base, index, disp); 1456 break; 1457 1458 case MO_UL | MO_BSWAP: 1459 /* swapped unsigned int load with upper bits zeroed */ 1460 tcg_out_insn(s, RXY, LRV, data, base, index, disp); 1461 tgen_ext32u(s, data, data); 1462 break; 1463 case MO_UL: 1464 tcg_out_insn(s, RXY, LLGF, data, base, index, disp); 1465 break; 1466 1467 case MO_SL | MO_BSWAP: 1468 /* swapped sign-extended int load */ 1469 tcg_out_insn(s, RXY, LRV, data, base, index, disp); 1470 tgen_ext32s(s, data, data); 1471 break; 1472 case MO_SL: 1473 tcg_out_insn(s, RXY, LGF, data, base, index, disp); 1474 break; 1475 1476 case MO_Q | MO_BSWAP: 1477 tcg_out_insn(s, RXY, LRVG, data, base, index, disp); 1478 break; 1479 case MO_Q: 1480 tcg_out_insn(s, RXY, LG, data, base, index, disp); 1481 break; 1482 1483 default: 1484 tcg_abort(); 1485 } 1486} 1487 1488static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, 1489 TCGReg base, TCGReg index, int disp) 1490{ 1491 switch (opc & (MO_SIZE | MO_BSWAP)) { 1492 case MO_UB: 1493 if (disp >= 0 && disp < 0x1000) { 1494 tcg_out_insn(s, RX, STC, data, base, index, disp); 1495 } else { 1496 tcg_out_insn(s, RXY, STCY, data, base, index, disp); 1497 } 1498 break; 1499 1500 case MO_UW | MO_BSWAP: 1501 tcg_out_insn(s, RXY, STRVH, data, base, index, disp); 1502 break; 1503 case MO_UW: 1504 if (disp >= 0 && disp < 0x1000) { 1505 tcg_out_insn(s, RX, STH, data, base, index, disp); 1506 } else { 1507 tcg_out_insn(s, RXY, STHY, data, base, index, disp); 1508 } 1509 break; 1510 1511 case MO_UL | MO_BSWAP: 1512 tcg_out_insn(s, RXY, STRV, data, base, index, disp); 1513 break; 1514 case MO_UL: 1515 if (disp >= 0 && disp < 0x1000) { 1516 tcg_out_insn(s, RX, ST, data, base, index, disp); 1517 } else { 1518 tcg_out_insn(s, RXY, STY, data, base, index, disp); 1519 } 1520 break; 1521 1522 case MO_Q | MO_BSWAP: 1523 tcg_out_insn(s, RXY, STRVG, data, base, index, disp); 1524 break; 1525 case MO_Q: 1526 tcg_out_insn(s, RXY, STG, data, base, index, disp); 1527 break; 1528 1529 default: 1530 tcg_abort(); 1531 } 1532} 1533 1534#if defined(CONFIG_SOFTMMU) 1535#include "../tcg-ldst.c.inc" 1536 1537/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */ 1538QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1539QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); 1540 1541/* Load and compare a TLB entry, leaving the flags set. Loads the TLB 1542 addend into R2. Returns a register with the santitized guest address. */ 1543static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, 1544 int mem_index, bool is_ld) 1545{ 1546 unsigned s_bits = opc & MO_SIZE; 1547 unsigned a_bits = get_alignment_bits(opc); 1548 unsigned s_mask = (1 << s_bits) - 1; 1549 unsigned a_mask = (1 << a_bits) - 1; 1550 int fast_off = TLB_MASK_TABLE_OFS(mem_index); 1551 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); 1552 int table_off = fast_off + offsetof(CPUTLBDescFast, table); 1553 int ofs, a_off; 1554 uint64_t tlb_mask; 1555 1556 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE, 1557 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 1558 tcg_out_insn(s, RXY, NG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, mask_off); 1559 tcg_out_insn(s, RXY, AG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, table_off); 1560 1561 /* For aligned accesses, we check the first byte and include the alignment 1562 bits within the address. For unaligned access, we check that we don't 1563 cross pages using the address of the last byte of the access. */ 1564 a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask); 1565 tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; 1566 if ((s390_facilities & FACILITY_GEN_INST_EXT) && a_off == 0) { 1567 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask); 1568 } else { 1569 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off); 1570 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask); 1571 } 1572 1573 if (is_ld) { 1574 ofs = offsetof(CPUTLBEntry, addr_read); 1575 } else { 1576 ofs = offsetof(CPUTLBEntry, addr_write); 1577 } 1578 if (TARGET_LONG_BITS == 32) { 1579 tcg_out_insn(s, RX, C, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs); 1580 } else { 1581 tcg_out_insn(s, RXY, CG, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs); 1582 } 1583 1584 tcg_out_insn(s, RXY, LG, TCG_REG_R2, TCG_REG_R2, TCG_REG_NONE, 1585 offsetof(CPUTLBEntry, addend)); 1586 1587 if (TARGET_LONG_BITS == 32) { 1588 tgen_ext32u(s, TCG_REG_R3, addr_reg); 1589 return TCG_REG_R3; 1590 } 1591 return addr_reg; 1592} 1593 1594static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, 1595 TCGReg data, TCGReg addr, 1596 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) 1597{ 1598 TCGLabelQemuLdst *label = new_ldst_label(s); 1599 1600 label->is_ld = is_ld; 1601 label->oi = oi; 1602 label->datalo_reg = data; 1603 label->addrlo_reg = addr; 1604 label->raddr = raddr; 1605 label->label_ptr[0] = label_ptr; 1606} 1607 1608static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1609{ 1610 TCGReg addr_reg = lb->addrlo_reg; 1611 TCGReg data_reg = lb->datalo_reg; 1612 TCGMemOpIdx oi = lb->oi; 1613 MemOp opc = get_memop(oi); 1614 1615 if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, 1616 (intptr_t)s->code_ptr, 2)) { 1617 return false; 1618 } 1619 1620 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); 1621 if (TARGET_LONG_BITS == 64) { 1622 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg); 1623 } 1624 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi); 1625 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr); 1626 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); 1627 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2); 1628 1629 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); 1630 return true; 1631} 1632 1633static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1634{ 1635 TCGReg addr_reg = lb->addrlo_reg; 1636 TCGReg data_reg = lb->datalo_reg; 1637 TCGMemOpIdx oi = lb->oi; 1638 MemOp opc = get_memop(oi); 1639 1640 if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, 1641 (intptr_t)s->code_ptr, 2)) { 1642 return false; 1643 } 1644 1645 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); 1646 if (TARGET_LONG_BITS == 64) { 1647 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg); 1648 } 1649 switch (opc & MO_SIZE) { 1650 case MO_UB: 1651 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); 1652 break; 1653 case MO_UW: 1654 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); 1655 break; 1656 case MO_UL: 1657 tgen_ext32u(s, TCG_REG_R4, data_reg); 1658 break; 1659 case MO_Q: 1660 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); 1661 break; 1662 default: 1663 tcg_abort(); 1664 } 1665 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi); 1666 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr); 1667 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); 1668 1669 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); 1670 return true; 1671} 1672#else 1673static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, 1674 TCGReg *index_reg, tcg_target_long *disp) 1675{ 1676 if (TARGET_LONG_BITS == 32) { 1677 tgen_ext32u(s, TCG_TMP0, *addr_reg); 1678 *addr_reg = TCG_TMP0; 1679 } 1680 if (guest_base < 0x80000) { 1681 *index_reg = TCG_REG_NONE; 1682 *disp = guest_base; 1683 } else { 1684 *index_reg = TCG_GUEST_BASE_REG; 1685 *disp = 0; 1686 } 1687} 1688#endif /* CONFIG_SOFTMMU */ 1689 1690static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, 1691 TCGMemOpIdx oi) 1692{ 1693 MemOp opc = get_memop(oi); 1694#ifdef CONFIG_SOFTMMU 1695 unsigned mem_index = get_mmuidx(oi); 1696 tcg_insn_unit *label_ptr; 1697 TCGReg base_reg; 1698 1699 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1); 1700 1701 tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); 1702 label_ptr = s->code_ptr; 1703 s->code_ptr += 1; 1704 1705 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); 1706 1707 add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr); 1708#else 1709 TCGReg index_reg; 1710 tcg_target_long disp; 1711 1712 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); 1713 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp); 1714#endif 1715} 1716 1717static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, 1718 TCGMemOpIdx oi) 1719{ 1720 MemOp opc = get_memop(oi); 1721#ifdef CONFIG_SOFTMMU 1722 unsigned mem_index = get_mmuidx(oi); 1723 tcg_insn_unit *label_ptr; 1724 TCGReg base_reg; 1725 1726 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0); 1727 1728 tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); 1729 label_ptr = s->code_ptr; 1730 s->code_ptr += 1; 1731 1732 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); 1733 1734 add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr); 1735#else 1736 TCGReg index_reg; 1737 tcg_target_long disp; 1738 1739 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); 1740 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp); 1741#endif 1742} 1743 1744# define OP_32_64(x) \ 1745 case glue(glue(INDEX_op_,x),_i32): \ 1746 case glue(glue(INDEX_op_,x),_i64) 1747 1748static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, 1749 const TCGArg *args, const int *const_args) 1750{ 1751 S390Opcode op, op2; 1752 TCGArg a0, a1, a2; 1753 1754 switch (opc) { 1755 case INDEX_op_exit_tb: 1756 /* Reuse the zeroing that exists for goto_ptr. */ 1757 a0 = args[0]; 1758 if (a0 == 0) { 1759 tgen_gotoi(s, S390_CC_ALWAYS, s->code_gen_epilogue); 1760 } else { 1761 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0); 1762 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr); 1763 } 1764 break; 1765 1766 case INDEX_op_goto_tb: 1767 a0 = args[0]; 1768 if (s->tb_jmp_insn_offset) { 1769 /* branch displacement must be aligned for atomic patching; 1770 * see if we need to add extra nop before branch 1771 */ 1772 if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) { 1773 tcg_out16(s, NOP); 1774 } 1775 tcg_debug_assert(!USE_REG_TB); 1776 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4)); 1777 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); 1778 s->code_ptr += 2; 1779 } else { 1780 /* load address stored at s->tb_jmp_target_addr + a0 */ 1781 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB, 1782 s->tb_jmp_target_addr + a0); 1783 /* and go there */ 1784 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB); 1785 } 1786 set_jmp_reset_offset(s, a0); 1787 1788 /* For the unlinked path of goto_tb, we need to reset 1789 TCG_REG_TB to the beginning of this TB. */ 1790 if (USE_REG_TB) { 1791 int ofs = -tcg_current_code_size(s); 1792 assert(ofs == (int16_t)ofs); 1793 tcg_out_insn(s, RI, AGHI, TCG_REG_TB, ofs); 1794 } 1795 break; 1796 1797 case INDEX_op_goto_ptr: 1798 a0 = args[0]; 1799 if (USE_REG_TB) { 1800 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0); 1801 } 1802 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0); 1803 break; 1804 1805 OP_32_64(ld8u): 1806 /* ??? LLC (RXY format) is only present with the extended-immediate 1807 facility, whereas LLGC is always present. */ 1808 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]); 1809 break; 1810 1811 OP_32_64(ld8s): 1812 /* ??? LB is no smaller than LGB, so no point to using it. */ 1813 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]); 1814 break; 1815 1816 OP_32_64(ld16u): 1817 /* ??? LLH (RXY format) is only present with the extended-immediate 1818 facility, whereas LLGH is always present. */ 1819 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]); 1820 break; 1821 1822 case INDEX_op_ld16s_i32: 1823 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]); 1824 break; 1825 1826 case INDEX_op_ld_i32: 1827 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]); 1828 break; 1829 1830 OP_32_64(st8): 1831 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1], 1832 TCG_REG_NONE, args[2]); 1833 break; 1834 1835 OP_32_64(st16): 1836 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1], 1837 TCG_REG_NONE, args[2]); 1838 break; 1839 1840 case INDEX_op_st_i32: 1841 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); 1842 break; 1843 1844 case INDEX_op_add_i32: 1845 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; 1846 if (const_args[2]) { 1847 do_addi_32: 1848 if (a0 == a1) { 1849 if (a2 == (int16_t)a2) { 1850 tcg_out_insn(s, RI, AHI, a0, a2); 1851 break; 1852 } 1853 if (s390_facilities & FACILITY_EXT_IMM) { 1854 tcg_out_insn(s, RIL, AFI, a0, a2); 1855 break; 1856 } 1857 } 1858 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2); 1859 } else if (a0 == a1) { 1860 tcg_out_insn(s, RR, AR, a0, a2); 1861 } else { 1862 tcg_out_insn(s, RX, LA, a0, a1, a2, 0); 1863 } 1864 break; 1865 case INDEX_op_sub_i32: 1866 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; 1867 if (const_args[2]) { 1868 a2 = -a2; 1869 goto do_addi_32; 1870 } else if (a0 == a1) { 1871 tcg_out_insn(s, RR, SR, a0, a2); 1872 } else { 1873 tcg_out_insn(s, RRF, SRK, a0, a1, a2); 1874 } 1875 break; 1876 1877 case INDEX_op_and_i32: 1878 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; 1879 if (const_args[2]) { 1880 tcg_out_mov(s, TCG_TYPE_I32, a0, a1); 1881 tgen_andi(s, TCG_TYPE_I32, a0, a2); 1882 } else if (a0 == a1) { 1883 tcg_out_insn(s, RR, NR, a0, a2); 1884 } else { 1885 tcg_out_insn(s, RRF, NRK, a0, a1, a2); 1886 } 1887 break; 1888 case INDEX_op_or_i32: 1889 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; 1890 if (const_args[2]) { 1891 tcg_out_mov(s, TCG_TYPE_I32, a0, a1); 1892 tgen_ori(s, TCG_TYPE_I32, a0, a2); 1893 } else if (a0 == a1) { 1894 tcg_out_insn(s, RR, OR, a0, a2); 1895 } else { 1896 tcg_out_insn(s, RRF, ORK, a0, a1, a2); 1897 } 1898 break; 1899 case INDEX_op_xor_i32: 1900 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; 1901 if (const_args[2]) { 1902 tcg_out_mov(s, TCG_TYPE_I32, a0, a1); 1903 tgen_xori(s, TCG_TYPE_I32, a0, a2); 1904 } else if (a0 == a1) { 1905 tcg_out_insn(s, RR, XR, args[0], args[2]); 1906 } else { 1907 tcg_out_insn(s, RRF, XRK, a0, a1, a2); 1908 } 1909 break; 1910 1911 case INDEX_op_neg_i32: 1912 tcg_out_insn(s, RR, LCR, args[0], args[1]); 1913 break; 1914 1915 case INDEX_op_mul_i32: 1916 if (const_args[2]) { 1917 if ((int32_t)args[2] == (int16_t)args[2]) { 1918 tcg_out_insn(s, RI, MHI, args[0], args[2]); 1919 } else { 1920 tcg_out_insn(s, RIL, MSFI, args[0], args[2]); 1921 } 1922 } else { 1923 tcg_out_insn(s, RRE, MSR, args[0], args[2]); 1924 } 1925 break; 1926 1927 case INDEX_op_div2_i32: 1928 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]); 1929 break; 1930 case INDEX_op_divu2_i32: 1931 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]); 1932 break; 1933 1934 case INDEX_op_shl_i32: 1935 op = RS_SLL; 1936 op2 = RSY_SLLK; 1937 do_shift32: 1938 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; 1939 if (a0 == a1) { 1940 if (const_args[2]) { 1941 tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2); 1942 } else { 1943 tcg_out_sh32(s, op, a0, a2, 0); 1944 } 1945 } else { 1946 /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */ 1947 if (const_args[2]) { 1948 tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2); 1949 } else { 1950 tcg_out_sh64(s, op2, a0, a1, a2, 0); 1951 } 1952 } 1953 break; 1954 case INDEX_op_shr_i32: 1955 op = RS_SRL; 1956 op2 = RSY_SRLK; 1957 goto do_shift32; 1958 case INDEX_op_sar_i32: 1959 op = RS_SRA; 1960 op2 = RSY_SRAK; 1961 goto do_shift32; 1962 1963 case INDEX_op_rotl_i32: 1964 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */ 1965 if (const_args[2]) { 1966 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]); 1967 } else { 1968 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0); 1969 } 1970 break; 1971 case INDEX_op_rotr_i32: 1972 if (const_args[2]) { 1973 tcg_out_sh64(s, RSY_RLL, args[0], args[1], 1974 TCG_REG_NONE, (32 - args[2]) & 31); 1975 } else { 1976 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]); 1977 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0); 1978 } 1979 break; 1980 1981 case INDEX_op_ext8s_i32: 1982 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]); 1983 break; 1984 case INDEX_op_ext16s_i32: 1985 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]); 1986 break; 1987 case INDEX_op_ext8u_i32: 1988 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]); 1989 break; 1990 case INDEX_op_ext16u_i32: 1991 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]); 1992 break; 1993 1994 OP_32_64(bswap16): 1995 /* The TCG bswap definition requires bits 0-47 already be zero. 1996 Thus we don't need the G-type insns to implement bswap16_i64. */ 1997 tcg_out_insn(s, RRE, LRVR, args[0], args[1]); 1998 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16); 1999 break; 2000 OP_32_64(bswap32): 2001 tcg_out_insn(s, RRE, LRVR, args[0], args[1]); 2002 break; 2003 2004 case INDEX_op_add2_i32: 2005 if (const_args[4]) { 2006 tcg_out_insn(s, RIL, ALFI, args[0], args[4]); 2007 } else { 2008 tcg_out_insn(s, RR, ALR, args[0], args[4]); 2009 } 2010 tcg_out_insn(s, RRE, ALCR, args[1], args[5]); 2011 break; 2012 case INDEX_op_sub2_i32: 2013 if (const_args[4]) { 2014 tcg_out_insn(s, RIL, SLFI, args[0], args[4]); 2015 } else { 2016 tcg_out_insn(s, RR, SLR, args[0], args[4]); 2017 } 2018 tcg_out_insn(s, RRE, SLBR, args[1], args[5]); 2019 break; 2020 2021 case INDEX_op_br: 2022 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0])); 2023 break; 2024 2025 case INDEX_op_brcond_i32: 2026 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0], 2027 args[1], const_args[1], arg_label(args[3])); 2028 break; 2029 case INDEX_op_setcond_i32: 2030 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], 2031 args[2], const_args[2]); 2032 break; 2033 case INDEX_op_movcond_i32: 2034 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], 2035 args[2], const_args[2], args[3], const_args[3]); 2036 break; 2037 2038 case INDEX_op_qemu_ld_i32: 2039 /* ??? Technically we can use a non-extending instruction. */ 2040 case INDEX_op_qemu_ld_i64: 2041 tcg_out_qemu_ld(s, args[0], args[1], args[2]); 2042 break; 2043 case INDEX_op_qemu_st_i32: 2044 case INDEX_op_qemu_st_i64: 2045 tcg_out_qemu_st(s, args[0], args[1], args[2]); 2046 break; 2047 2048 case INDEX_op_ld16s_i64: 2049 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]); 2050 break; 2051 case INDEX_op_ld32u_i64: 2052 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]); 2053 break; 2054 case INDEX_op_ld32s_i64: 2055 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]); 2056 break; 2057 case INDEX_op_ld_i64: 2058 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]); 2059 break; 2060 2061 case INDEX_op_st32_i64: 2062 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); 2063 break; 2064 case INDEX_op_st_i64: 2065 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]); 2066 break; 2067 2068 case INDEX_op_add_i64: 2069 a0 = args[0], a1 = args[1], a2 = args[2]; 2070 if (const_args[2]) { 2071 do_addi_64: 2072 if (a0 == a1) { 2073 if (a2 == (int16_t)a2) { 2074 tcg_out_insn(s, RI, AGHI, a0, a2); 2075 break; 2076 } 2077 if (s390_facilities & FACILITY_EXT_IMM) { 2078 if (a2 == (int32_t)a2) { 2079 tcg_out_insn(s, RIL, AGFI, a0, a2); 2080 break; 2081 } else if (a2 == (uint32_t)a2) { 2082 tcg_out_insn(s, RIL, ALGFI, a0, a2); 2083 break; 2084 } else if (-a2 == (uint32_t)-a2) { 2085 tcg_out_insn(s, RIL, SLGFI, a0, -a2); 2086 break; 2087 } 2088 } 2089 } 2090 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2); 2091 } else if (a0 == a1) { 2092 tcg_out_insn(s, RRE, AGR, a0, a2); 2093 } else { 2094 tcg_out_insn(s, RX, LA, a0, a1, a2, 0); 2095 } 2096 break; 2097 case INDEX_op_sub_i64: 2098 a0 = args[0], a1 = args[1], a2 = args[2]; 2099 if (const_args[2]) { 2100 a2 = -a2; 2101 goto do_addi_64; 2102 } else if (a0 == a1) { 2103 tcg_out_insn(s, RRE, SGR, a0, a2); 2104 } else { 2105 tcg_out_insn(s, RRF, SGRK, a0, a1, a2); 2106 } 2107 break; 2108 2109 case INDEX_op_and_i64: 2110 a0 = args[0], a1 = args[1], a2 = args[2]; 2111 if (const_args[2]) { 2112 tcg_out_mov(s, TCG_TYPE_I64, a0, a1); 2113 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]); 2114 } else if (a0 == a1) { 2115 tcg_out_insn(s, RRE, NGR, args[0], args[2]); 2116 } else { 2117 tcg_out_insn(s, RRF, NGRK, a0, a1, a2); 2118 } 2119 break; 2120 case INDEX_op_or_i64: 2121 a0 = args[0], a1 = args[1], a2 = args[2]; 2122 if (const_args[2]) { 2123 tcg_out_mov(s, TCG_TYPE_I64, a0, a1); 2124 tgen_ori(s, TCG_TYPE_I64, a0, a2); 2125 } else if (a0 == a1) { 2126 tcg_out_insn(s, RRE, OGR, a0, a2); 2127 } else { 2128 tcg_out_insn(s, RRF, OGRK, a0, a1, a2); 2129 } 2130 break; 2131 case INDEX_op_xor_i64: 2132 a0 = args[0], a1 = args[1], a2 = args[2]; 2133 if (const_args[2]) { 2134 tcg_out_mov(s, TCG_TYPE_I64, a0, a1); 2135 tgen_xori(s, TCG_TYPE_I64, a0, a2); 2136 } else if (a0 == a1) { 2137 tcg_out_insn(s, RRE, XGR, a0, a2); 2138 } else { 2139 tcg_out_insn(s, RRF, XGRK, a0, a1, a2); 2140 } 2141 break; 2142 2143 case INDEX_op_neg_i64: 2144 tcg_out_insn(s, RRE, LCGR, args[0], args[1]); 2145 break; 2146 case INDEX_op_bswap64_i64: 2147 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]); 2148 break; 2149 2150 case INDEX_op_mul_i64: 2151 if (const_args[2]) { 2152 if (args[2] == (int16_t)args[2]) { 2153 tcg_out_insn(s, RI, MGHI, args[0], args[2]); 2154 } else { 2155 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]); 2156 } 2157 } else { 2158 tcg_out_insn(s, RRE, MSGR, args[0], args[2]); 2159 } 2160 break; 2161 2162 case INDEX_op_div2_i64: 2163 /* ??? We get an unnecessary sign-extension of the dividend 2164 into R3 with this definition, but as we do in fact always 2165 produce both quotient and remainder using INDEX_op_div_i64 2166 instead requires jumping through even more hoops. */ 2167 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]); 2168 break; 2169 case INDEX_op_divu2_i64: 2170 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]); 2171 break; 2172 case INDEX_op_mulu2_i64: 2173 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]); 2174 break; 2175 2176 case INDEX_op_shl_i64: 2177 op = RSY_SLLG; 2178 do_shift64: 2179 if (const_args[2]) { 2180 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]); 2181 } else { 2182 tcg_out_sh64(s, op, args[0], args[1], args[2], 0); 2183 } 2184 break; 2185 case INDEX_op_shr_i64: 2186 op = RSY_SRLG; 2187 goto do_shift64; 2188 case INDEX_op_sar_i64: 2189 op = RSY_SRAG; 2190 goto do_shift64; 2191 2192 case INDEX_op_rotl_i64: 2193 if (const_args[2]) { 2194 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], 2195 TCG_REG_NONE, args[2]); 2196 } else { 2197 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0); 2198 } 2199 break; 2200 case INDEX_op_rotr_i64: 2201 if (const_args[2]) { 2202 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], 2203 TCG_REG_NONE, (64 - args[2]) & 63); 2204 } else { 2205 /* We can use the smaller 32-bit negate because only the 2206 low 6 bits are examined for the rotate. */ 2207 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]); 2208 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0); 2209 } 2210 break; 2211 2212 case INDEX_op_ext8s_i64: 2213 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]); 2214 break; 2215 case INDEX_op_ext16s_i64: 2216 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]); 2217 break; 2218 case INDEX_op_ext_i32_i64: 2219 case INDEX_op_ext32s_i64: 2220 tgen_ext32s(s, args[0], args[1]); 2221 break; 2222 case INDEX_op_ext8u_i64: 2223 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]); 2224 break; 2225 case INDEX_op_ext16u_i64: 2226 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]); 2227 break; 2228 case INDEX_op_extu_i32_i64: 2229 case INDEX_op_ext32u_i64: 2230 tgen_ext32u(s, args[0], args[1]); 2231 break; 2232 2233 case INDEX_op_add2_i64: 2234 if (const_args[4]) { 2235 if ((int64_t)args[4] >= 0) { 2236 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]); 2237 } else { 2238 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]); 2239 } 2240 } else { 2241 tcg_out_insn(s, RRE, ALGR, args[0], args[4]); 2242 } 2243 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]); 2244 break; 2245 case INDEX_op_sub2_i64: 2246 if (const_args[4]) { 2247 if ((int64_t)args[4] >= 0) { 2248 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]); 2249 } else { 2250 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]); 2251 } 2252 } else { 2253 tcg_out_insn(s, RRE, SLGR, args[0], args[4]); 2254 } 2255 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]); 2256 break; 2257 2258 case INDEX_op_brcond_i64: 2259 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0], 2260 args[1], const_args[1], arg_label(args[3])); 2261 break; 2262 case INDEX_op_setcond_i64: 2263 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], 2264 args[2], const_args[2]); 2265 break; 2266 case INDEX_op_movcond_i64: 2267 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], 2268 args[2], const_args[2], args[3], const_args[3]); 2269 break; 2270 2271 OP_32_64(deposit): 2272 a0 = args[0], a1 = args[1], a2 = args[2]; 2273 if (const_args[1]) { 2274 tgen_deposit(s, a0, a2, args[3], args[4], 1); 2275 } else { 2276 /* Since we can't support "0Z" as a constraint, we allow a1 in 2277 any register. Fix things up as if a matching constraint. */ 2278 if (a0 != a1) { 2279 TCGType type = (opc == INDEX_op_deposit_i64); 2280 if (a0 == a2) { 2281 tcg_out_mov(s, type, TCG_TMP0, a2); 2282 a2 = TCG_TMP0; 2283 } 2284 tcg_out_mov(s, type, a0, a1); 2285 } 2286 tgen_deposit(s, a0, a2, args[3], args[4], 0); 2287 } 2288 break; 2289 2290 OP_32_64(extract): 2291 tgen_extract(s, args[0], args[1], args[2], args[3]); 2292 break; 2293 2294 case INDEX_op_clz_i64: 2295 tgen_clz(s, args[0], args[1], args[2], const_args[2]); 2296 break; 2297 2298 case INDEX_op_mb: 2299 /* The host memory model is quite strong, we simply need to 2300 serialize the instruction stream. */ 2301 if (args[0] & TCG_MO_ST_LD) { 2302 tcg_out_insn(s, RR, BCR, 2303 s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0); 2304 } 2305 break; 2306 2307 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 2308 case INDEX_op_mov_i64: 2309 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ 2310 case INDEX_op_movi_i64: 2311 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 2312 default: 2313 tcg_abort(); 2314 } 2315} 2316 2317static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) 2318{ 2319 static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; 2320 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; 2321 static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; 2322 static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } }; 2323 static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } }; 2324 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; 2325 static const TCGTargetOpDef r_0_ri = { .args_ct_str = { "r", "0", "ri" } }; 2326 static const TCGTargetOpDef r_0_rI = { .args_ct_str = { "r", "0", "rI" } }; 2327 static const TCGTargetOpDef r_0_rJ = { .args_ct_str = { "r", "0", "rJ" } }; 2328 static const TCGTargetOpDef a2_r 2329 = { .args_ct_str = { "r", "r", "0", "1", "r", "r" } }; 2330 static const TCGTargetOpDef a2_ri 2331 = { .args_ct_str = { "r", "r", "0", "1", "ri", "r" } }; 2332 static const TCGTargetOpDef a2_rA 2333 = { .args_ct_str = { "r", "r", "0", "1", "rA", "r" } }; 2334 2335 switch (op) { 2336 case INDEX_op_goto_ptr: 2337 return &r; 2338 2339 case INDEX_op_ld8u_i32: 2340 case INDEX_op_ld8u_i64: 2341 case INDEX_op_ld8s_i32: 2342 case INDEX_op_ld8s_i64: 2343 case INDEX_op_ld16u_i32: 2344 case INDEX_op_ld16u_i64: 2345 case INDEX_op_ld16s_i32: 2346 case INDEX_op_ld16s_i64: 2347 case INDEX_op_ld_i32: 2348 case INDEX_op_ld32u_i64: 2349 case INDEX_op_ld32s_i64: 2350 case INDEX_op_ld_i64: 2351 case INDEX_op_st8_i32: 2352 case INDEX_op_st8_i64: 2353 case INDEX_op_st16_i32: 2354 case INDEX_op_st16_i64: 2355 case INDEX_op_st_i32: 2356 case INDEX_op_st32_i64: 2357 case INDEX_op_st_i64: 2358 return &r_r; 2359 2360 case INDEX_op_add_i32: 2361 case INDEX_op_add_i64: 2362 return &r_r_ri; 2363 case INDEX_op_sub_i32: 2364 case INDEX_op_sub_i64: 2365 case INDEX_op_and_i32: 2366 case INDEX_op_and_i64: 2367 case INDEX_op_or_i32: 2368 case INDEX_op_or_i64: 2369 case INDEX_op_xor_i32: 2370 case INDEX_op_xor_i64: 2371 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri); 2372 2373 case INDEX_op_mul_i32: 2374 /* If we have the general-instruction-extensions, then we have 2375 MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we 2376 have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */ 2377 return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_ri : &r_0_rI); 2378 case INDEX_op_mul_i64: 2379 return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_rJ : &r_0_rI); 2380 2381 case INDEX_op_shl_i32: 2382 case INDEX_op_shr_i32: 2383 case INDEX_op_sar_i32: 2384 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri); 2385 2386 case INDEX_op_shl_i64: 2387 case INDEX_op_shr_i64: 2388 case INDEX_op_sar_i64: 2389 return &r_r_ri; 2390 2391 case INDEX_op_rotl_i32: 2392 case INDEX_op_rotl_i64: 2393 case INDEX_op_rotr_i32: 2394 case INDEX_op_rotr_i64: 2395 return &r_r_ri; 2396 2397 case INDEX_op_brcond_i32: 2398 case INDEX_op_brcond_i64: 2399 return &r_ri; 2400 2401 case INDEX_op_bswap16_i32: 2402 case INDEX_op_bswap16_i64: 2403 case INDEX_op_bswap32_i32: 2404 case INDEX_op_bswap32_i64: 2405 case INDEX_op_bswap64_i64: 2406 case INDEX_op_neg_i32: 2407 case INDEX_op_neg_i64: 2408 case INDEX_op_ext8s_i32: 2409 case INDEX_op_ext8s_i64: 2410 case INDEX_op_ext8u_i32: 2411 case INDEX_op_ext8u_i64: 2412 case INDEX_op_ext16s_i32: 2413 case INDEX_op_ext16s_i64: 2414 case INDEX_op_ext16u_i32: 2415 case INDEX_op_ext16u_i64: 2416 case INDEX_op_ext32s_i64: 2417 case INDEX_op_ext32u_i64: 2418 case INDEX_op_ext_i32_i64: 2419 case INDEX_op_extu_i32_i64: 2420 case INDEX_op_extract_i32: 2421 case INDEX_op_extract_i64: 2422 return &r_r; 2423 2424 case INDEX_op_clz_i64: 2425 case INDEX_op_setcond_i32: 2426 case INDEX_op_setcond_i64: 2427 return &r_r_ri; 2428 2429 case INDEX_op_qemu_ld_i32: 2430 case INDEX_op_qemu_ld_i64: 2431 return &r_L; 2432 case INDEX_op_qemu_st_i64: 2433 case INDEX_op_qemu_st_i32: 2434 return &L_L; 2435 2436 case INDEX_op_deposit_i32: 2437 case INDEX_op_deposit_i64: 2438 { 2439 static const TCGTargetOpDef dep 2440 = { .args_ct_str = { "r", "rZ", "r" } }; 2441 return &dep; 2442 } 2443 case INDEX_op_movcond_i32: 2444 case INDEX_op_movcond_i64: 2445 { 2446 static const TCGTargetOpDef movc 2447 = { .args_ct_str = { "r", "r", "ri", "r", "0" } }; 2448 static const TCGTargetOpDef movc_l 2449 = { .args_ct_str = { "r", "r", "ri", "rI", "0" } }; 2450 return (s390_facilities & FACILITY_LOAD_ON_COND2 ? &movc_l : &movc); 2451 } 2452 case INDEX_op_div2_i32: 2453 case INDEX_op_div2_i64: 2454 case INDEX_op_divu2_i32: 2455 case INDEX_op_divu2_i64: 2456 { 2457 static const TCGTargetOpDef div2 2458 = { .args_ct_str = { "b", "a", "0", "1", "r" } }; 2459 return &div2; 2460 } 2461 case INDEX_op_mulu2_i64: 2462 { 2463 static const TCGTargetOpDef mul2 2464 = { .args_ct_str = { "b", "a", "0", "r" } }; 2465 return &mul2; 2466 } 2467 2468 case INDEX_op_add2_i32: 2469 case INDEX_op_sub2_i32: 2470 return (s390_facilities & FACILITY_EXT_IMM ? &a2_ri : &a2_r); 2471 case INDEX_op_add2_i64: 2472 case INDEX_op_sub2_i64: 2473 return (s390_facilities & FACILITY_EXT_IMM ? &a2_rA : &a2_r); 2474 2475 default: 2476 break; 2477 } 2478 return NULL; 2479} 2480 2481static void query_s390_facilities(void) 2482{ 2483 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2484 2485 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this 2486 is present on all 64-bit systems, but let's check for it anyway. */ 2487 if (hwcap & HWCAP_S390_STFLE) { 2488 register int r0 __asm__("0"); 2489 register void *r1 __asm__("1"); 2490 2491 /* stfle 0(%r1) */ 2492 r1 = &s390_facilities; 2493 asm volatile(".word 0xb2b0,0x1000" 2494 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc"); 2495 } 2496} 2497 2498static void tcg_target_init(TCGContext *s) 2499{ 2500 query_s390_facilities(); 2501 2502 tcg_target_available_regs[TCG_TYPE_I32] = 0xffff; 2503 tcg_target_available_regs[TCG_TYPE_I64] = 0xffff; 2504 2505 tcg_target_call_clobber_regs = 0; 2506 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); 2507 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1); 2508 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); 2509 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); 2510 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4); 2511 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5); 2512 /* The r6 register is technically call-saved, but it's also a parameter 2513 register, so it can get killed by setup for the qemu_st helper. */ 2514 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6); 2515 /* The return register can be considered call-clobbered. */ 2516 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); 2517 2518 s->reserved_regs = 0; 2519 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); 2520 /* XXX many insns can't be used with R0, so we better avoid it for now */ 2521 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); 2522 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 2523 if (USE_REG_TB) { 2524 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); 2525 } 2526} 2527 2528#define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \ 2529 + TCG_STATIC_CALL_ARGS_SIZE \ 2530 + CPU_TEMP_BUF_NLONGS * sizeof(long))) 2531 2532static void tcg_target_qemu_prologue(TCGContext *s) 2533{ 2534 /* stmg %r6,%r15,48(%r15) (save registers) */ 2535 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48); 2536 2537 /* aghi %r15,-frame_size */ 2538 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE); 2539 2540 tcg_set_frame(s, TCG_REG_CALL_STACK, 2541 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET, 2542 CPU_TEMP_BUF_NLONGS * sizeof(long)); 2543 2544#ifndef CONFIG_SOFTMMU 2545 if (guest_base >= 0x80000) { 2546 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true); 2547 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 2548 } 2549#endif 2550 2551 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 2552 if (USE_REG_TB) { 2553 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, 2554 tcg_target_call_iarg_regs[1]); 2555 } 2556 2557 /* br %r3 (go to TB) */ 2558 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]); 2559 2560 /* 2561 * Return path for goto_ptr. Set return value to 0, a-la exit_tb, 2562 * and fall through to the rest of the epilogue. 2563 */ 2564 s->code_gen_epilogue = s->code_ptr; 2565 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0); 2566 2567 /* TB epilogue */ 2568 tb_ret_addr = s->code_ptr; 2569 2570 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */ 2571 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 2572 FRAME_SIZE + 48); 2573 2574 /* br %r14 (return) */ 2575 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14); 2576} 2577 2578static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 2579{ 2580 memset(p, 0x07, count * sizeof(tcg_insn_unit)); 2581} 2582 2583typedef struct { 2584 DebugFrameHeader h; 2585 uint8_t fde_def_cfa[4]; 2586 uint8_t fde_reg_ofs[18]; 2587} DebugFrame; 2588 2589/* We're expecting a 2 byte uleb128 encoded value. */ 2590QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); 2591 2592#define ELF_HOST_MACHINE EM_S390 2593 2594static const DebugFrame debug_frame = { 2595 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 2596 .h.cie.id = -1, 2597 .h.cie.version = 1, 2598 .h.cie.code_align = 1, 2599 .h.cie.data_align = 8, /* sleb128 8 */ 2600 .h.cie.return_column = TCG_REG_R14, 2601 2602 /* Total FDE size does not include the "len" member. */ 2603 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 2604 2605 .fde_def_cfa = { 2606 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */ 2607 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 2608 (FRAME_SIZE >> 7) 2609 }, 2610 .fde_reg_ofs = { 2611 0x86, 6, /* DW_CFA_offset, %r6, 48 */ 2612 0x87, 7, /* DW_CFA_offset, %r7, 56 */ 2613 0x88, 8, /* DW_CFA_offset, %r8, 64 */ 2614 0x89, 9, /* DW_CFA_offset, %r92, 72 */ 2615 0x8a, 10, /* DW_CFA_offset, %r10, 80 */ 2616 0x8b, 11, /* DW_CFA_offset, %r11, 88 */ 2617 0x8c, 12, /* DW_CFA_offset, %r12, 96 */ 2618 0x8d, 13, /* DW_CFA_offset, %r13, 104 */ 2619 0x8e, 14, /* DW_CFA_offset, %r14, 112 */ 2620 } 2621}; 2622 2623void tcg_register_jit(void *buf, size_t buf_size) 2624{ 2625 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 2626} 2627