1dnl Copyright (c) 2014, Red Hat Inc. All rights reserved. 2dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3dnl 4dnl This code is free software; you can redistribute it and/or modify it 5dnl under the terms of the GNU General Public License version 2 only, as 6dnl published by the Free Software Foundation. 7dnl 8dnl This code is distributed in the hope that it will be useful, but WITHOUT 9dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10dnl FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11dnl version 2 for more details (a copy is included in the LICENSE file that 12dnl accompanied this code). 13dnl 14dnl You should have received a copy of the GNU General Public License version 15dnl 2 along with this work; if not, write to the Free Software Foundation, 16dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17dnl 18dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19dnl or visit www.oracle.com if you need additional information or have any 20dnl questions. 21dnl 22dnl 23dnl Process this file with m4 aarch64_ad.m4 to generate the arithmetic 24dnl and shift patterns patterns used in aarch64.ad. 25dnl 26// BEGIN This section of the file is automatically generated. Do not edit -------------- 27dnl 28define(`ORL2I', `ifelse($1,I,orL2I)') 29dnl 30define(`BASE_SHIFT_INSN', 31` 32instruct $2$1_reg_$4_reg(iReg$1NoSp dst, 33 iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, 34 immI src3, rFlagsReg cr) %{ 35 match(Set dst ($2$1 src1 ($4$1 src2 src3))); 36 37 ins_cost(1.9 * INSN_COST); 38 format %{ "$3 $dst, $src1, $src2, $5 $src3" %} 39 40 ins_encode %{ 41 __ $3(as_Register($dst$$reg), 42 as_Register($src1$$reg), 43 as_Register($src2$$reg), 44 Assembler::$5, 45 $src3$$constant & ifelse($1,I,0x1f,0x3f)); 46 %} 47 48 ins_pipe(ialu_reg_reg_shift); 49%}')dnl 50define(`BASE_INVERTED_INSN', 51` 52instruct $2$1_reg_not_reg(iReg$1NoSp dst, 53 iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1, 54 rFlagsReg cr) %{ 55dnl This ifelse is because hotspot reassociates (xor (xor ..)..) 56dnl into this canonical form. 57 ifelse($2,Xor, 58 match(Set dst (Xor$1 m1 (Xor$1 src2 src1)));, 59 match(Set dst ($2$1 src1 (Xor$1 src2 m1)));) 60 ins_cost(INSN_COST); 61 format %{ "$3 $dst, $src1, $src2" %} 62 63 ins_encode %{ 64 __ $3(as_Register($dst$$reg), 65 as_Register($src1$$reg), 66 as_Register($src2$$reg), 67 Assembler::LSL, 0); 68 %} 69 70 ins_pipe(ialu_reg_reg); 71%}')dnl 72define(`INVERTED_SHIFT_INSN', 73` 74instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst, 75 iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, 76 immI src3, imm$1_M1 src4, rFlagsReg cr) %{ 77dnl This ifelse is because hotspot reassociates (xor (xor ..)..) 78dnl into this canonical form. 79 ifelse($2,Xor, 80 match(Set dst ($2$1 src4 (Xor$1($4$1 src2 src3) src1)));, 81 match(Set dst ($2$1 src1 (Xor$1($4$1 src2 src3) src4)));) 82 ins_cost(1.9 * INSN_COST); 83 format %{ "$3 $dst, $src1, $src2, $5 $src3" %} 84 85 ins_encode %{ 86 __ $3(as_Register($dst$$reg), 87 as_Register($src1$$reg), 88 as_Register($src2$$reg), 89 Assembler::$5, 90 $src3$$constant & ifelse($1,I,0x1f,0x3f)); 91 %} 92 93 ins_pipe(ialu_reg_reg_shift); 94%}')dnl 95define(`NOT_INSN', 96`instruct reg$1_not_reg(iReg$1NoSp dst, 97 iReg$1`'ORL2I($1) src1, imm$1_M1 m1, 98 rFlagsReg cr) %{ 99 match(Set dst (Xor$1 src1 m1)); 100 ins_cost(INSN_COST); 101 format %{ "$2 $dst, $src1, zr" %} 102 103 ins_encode %{ 104 __ $2(as_Register($dst$$reg), 105 as_Register($src1$$reg), 106 zr, 107 Assembler::LSL, 0); 108 %} 109 110 ins_pipe(ialu_reg); 111%}')dnl 112dnl 113define(`BOTH_SHIFT_INSNS', 114`BASE_SHIFT_INSN(I, $1, ifelse($2,andr,andw,$2w), $3, $4) 115BASE_SHIFT_INSN(L, $1, $2, $3, $4)')dnl 116dnl 117define(`BOTH_INVERTED_INSNS', 118`BASE_INVERTED_INSN(I, $1, $2w, $3, $4) 119BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl 120dnl 121define(`BOTH_INVERTED_SHIFT_INSNS', 122`INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4, ~0, int) 123INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, long)')dnl 124dnl 125define(`ALL_SHIFT_KINDS', 126`BOTH_SHIFT_INSNS($1, $2, URShift, LSR) 127BOTH_SHIFT_INSNS($1, $2, RShift, ASR) 128BOTH_SHIFT_INSNS($1, $2, LShift, LSL)')dnl 129dnl 130define(`ALL_INVERTED_SHIFT_KINDS', 131`BOTH_INVERTED_SHIFT_INSNS($1, $2, URShift, LSR) 132BOTH_INVERTED_SHIFT_INSNS($1, $2, RShift, ASR) 133BOTH_INVERTED_SHIFT_INSNS($1, $2, LShift, LSL)')dnl 134dnl 135NOT_INSN(L, eon) 136NOT_INSN(I, eonw) 137BOTH_INVERTED_INSNS(And, bic) 138BOTH_INVERTED_INSNS(Or, orn) 139BOTH_INVERTED_INSNS(Xor, eon) 140ALL_INVERTED_SHIFT_KINDS(And, bic) 141ALL_INVERTED_SHIFT_KINDS(Xor, eon) 142ALL_INVERTED_SHIFT_KINDS(Or, orn) 143ALL_SHIFT_KINDS(And, andr) 144ALL_SHIFT_KINDS(Xor, eor) 145ALL_SHIFT_KINDS(Or, orr) 146ALL_SHIFT_KINDS(Add, add) 147ALL_SHIFT_KINDS(Sub, sub) 148dnl 149dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count 150define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)') 151define(`BFM_INSN',` 152// Shift Left followed by Shift Right. 153// This idiom is used by the compiler for the i2b bytecode etc. 154instruct $4$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift_count, immI rshift_count) 155%{ 156 match(Set dst EXTEND($1, $3, src, lshift_count, rshift_count)); 157 ins_cost(INSN_COST * 2); 158 format %{ "$4 $dst, $src, $rshift_count - $lshift_count, #$2 - $lshift_count" %} 159 ins_encode %{ 160 int lshift = $lshift_count$$constant & $2; 161 int rshift = $rshift_count$$constant & $2; 162 int s = $2 - lshift; 163 int r = (rshift - lshift) & $2; 164 __ $4(as_Register($dst$$reg), 165 as_Register($src$$reg), 166 r, s); 167 %} 168 169 ins_pipe(ialu_reg_shift); 170%}') 171BFM_INSN(L, 63, RShift, sbfm) 172BFM_INSN(I, 31, RShift, sbfmw) 173BFM_INSN(L, 63, URShift, ubfm) 174BFM_INSN(I, 31, URShift, ubfmw) 175dnl 176// Bitfield extract with shift & mask 177define(`BFX_INSN', 178`instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask) 179%{ 180 match(Set dst (And$1 ($2$1 src rshift) mask)); 181 // Make sure we are not going to exceed what $3 can do. 182 predicate((exact_log2$6(n->in(2)->get_$5() + 1) + (n->in(1)->in(2)->get_int() & $4)) <= ($4 + 1)); 183 184 ins_cost(INSN_COST); 185 format %{ "$3 $dst, $src, $rshift, $mask" %} 186 ins_encode %{ 187 int rshift = $rshift$$constant & $4; 188 long mask = $mask$$constant; 189 int width = exact_log2$6(mask+1); 190 __ $3(as_Register($dst$$reg), 191 as_Register($src$$reg), rshift, width); 192 %} 193 ins_pipe(ialu_reg_shift); 194%}') 195BFX_INSN(I, URShift, ubfxw, 31, int) 196BFX_INSN(L, URShift, ubfx, 63, long, _long) 197 198// We can use ubfx when extending an And with a mask when we know mask 199// is positive. We know that because immI_bitmask guarantees it. 200instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask) 201%{ 202 match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask))); 203 // Make sure we are not going to exceed what ubfxw can do. 204 predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1)); 205 206 ins_cost(INSN_COST * 2); 207 format %{ "ubfx $dst, $src, $rshift, $mask" %} 208 ins_encode %{ 209 int rshift = $rshift$$constant & 31; 210 long mask = $mask$$constant; 211 int width = exact_log2(mask+1); 212 __ ubfx(as_Register($dst$$reg), 213 as_Register($src$$reg), rshift, width); 214 %} 215 ins_pipe(ialu_reg_shift); 216%} 217 218define(`UBFIZ_INSN', 219// We can use ubfiz when masking by a positive number and then left shifting the result. 220// We know that the mask is positive because imm$1_bitmask guarantees it. 221`instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask) 222%{ 223 match(Set dst (LShift$1 (And$1 src mask) lshift)); 224 predicate((exact_log2$5(n->in(1)->in(2)->get_$4() + 1) + (n->in(2)->get_int() & $3)) <= ($3 + 1)); 225 226 ins_cost(INSN_COST); 227 format %{ "$2 $dst, $src, $lshift, $mask" %} 228 ins_encode %{ 229 int lshift = $lshift$$constant & $3; 230 long mask = $mask$$constant; 231 int width = exact_log2$5(mask+1); 232 __ $2(as_Register($dst$$reg), 233 as_Register($src$$reg), lshift, width); 234 %} 235 ins_pipe(ialu_reg_shift); 236%}') 237UBFIZ_INSN(I, ubfizw, 31, int) 238UBFIZ_INSN(L, ubfiz, 63, long, _long) 239 240// If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz 241instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask) 242%{ 243 match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift)); 244 predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1)); 245 246 ins_cost(INSN_COST); 247 format %{ "ubfiz $dst, $src, $lshift, $mask" %} 248 ins_encode %{ 249 int lshift = $lshift$$constant & 63; 250 long mask = $mask$$constant; 251 int width = exact_log2(mask+1); 252 __ ubfiz(as_Register($dst$$reg), 253 as_Register($src$$reg), lshift, width); 254 %} 255 ins_pipe(ialu_reg_shift); 256%} 257 258// Rotations 259 260define(`EXTRACT_INSN', 261`instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr) 262%{ 263 match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift))); 264 predicate(0 == (((n->in(1)->in(2)->get_int() & $2) + (n->in(2)->in(2)->get_int() & $2)) & $2)); 265 266 ins_cost(INSN_COST); 267 format %{ "extr $dst, $src1, $src2, #$rshift" %} 268 269 ins_encode %{ 270 __ $4(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg), 271 $rshift$$constant & $2); 272 %} 273 ins_pipe(ialu_reg_reg_extr); 274%} 275')dnl 276EXTRACT_INSN(L, 63, Or, extr) 277EXTRACT_INSN(I, 31, Or, extrw) 278EXTRACT_INSN(L, 63, Add, extr) 279EXTRACT_INSN(I, 31, Add, extrw) 280define(`ROL_EXPAND', ` 281// $2 expander 282 283instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr) 284%{ 285 effect(DEF dst, USE src, USE shift); 286 287 format %{ "$2 $dst, $src, $shift" %} 288 ins_cost(INSN_COST * 3); 289 ins_encode %{ 290 __ subw(rscratch1, zr, as_Register($shift$$reg)); 291 __ $3(as_Register($dst$$reg), as_Register($src$$reg), 292 rscratch1); 293 %} 294 ins_pipe(ialu_reg_reg_vshift); 295%}')dnl 296define(`ROR_EXPAND', ` 297// $2 expander 298 299instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr) 300%{ 301 effect(DEF dst, USE src, USE shift); 302 303 format %{ "$2 $dst, $src, $shift" %} 304 ins_cost(INSN_COST); 305 ins_encode %{ 306 __ $3(as_Register($dst$$reg), as_Register($src$$reg), 307 as_Register($shift$$reg)); 308 %} 309 ins_pipe(ialu_reg_reg_vshift); 310%}')dnl 311define(ROL_INSN, ` 312instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr) 313%{ 314 match(Set dst (Or$1 (LShift$1 src shift) (URShift$1 src (SubI c$2 shift)))); 315 316 expand %{ 317 $3$1_rReg(dst, src, shift, cr); 318 %} 319%}')dnl 320define(ROR_INSN, ` 321instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr) 322%{ 323 match(Set dst (Or$1 (URShift$1 src shift) (LShift$1 src (SubI c$2 shift)))); 324 325 expand %{ 326 $3$1_rReg(dst, src, shift, cr); 327 %} 328%}')dnl 329ROL_EXPAND(L, rol, rorv) 330ROL_EXPAND(I, rol, rorvw) 331ROL_INSN(L, _64, rol) 332ROL_INSN(L, 0, rol) 333ROL_INSN(I, _32, rol) 334ROL_INSN(I, 0, rol) 335ROR_EXPAND(L, ror, rorv) 336ROR_EXPAND(I, ror, rorvw) 337ROR_INSN(L, _64, ror) 338ROR_INSN(L, 0, ror) 339ROR_INSN(I, _32, ror) 340ROR_INSN(I, 0, ror) 341 342// Add/subtract (extended) 343dnl ADD_SUB_EXTENDED(mode, size, add node, shift node, insn, shift type, wordsize 344define(`ADD_SUB_CONV', ` 345instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2, rFlagsReg cr) 346%{ 347 match(Set dst ($3$2 src1 (ConvI2L src2))); 348 ins_cost(INSN_COST); 349 format %{ "$4 $dst, $src1, $src2, $5" %} 350 351 ins_encode %{ 352 __ $4(as_Register($dst$$reg), as_Register($src1$$reg), 353 as_Register($src2$$reg), ext::$5); 354 %} 355 ins_pipe(ialu_reg_reg); 356%}')dnl 357ADD_SUB_CONV(I,L,Add,add,sxtw); 358ADD_SUB_CONV(I,L,Sub,sub,sxtw); 359dnl 360define(`ADD_SUB_EXTENDED', ` 361instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr) 362%{ 363 match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift))); 364 ins_cost(INSN_COST); 365 format %{ "$5 $dst, $src1, $src2, $6" %} 366 367 ins_encode %{ 368 __ $5(as_Register($dst$$reg), as_Register($src1$$reg), 369 as_Register($src2$$reg), ext::$6); 370 %} 371 ins_pipe(ialu_reg_reg); 372%}') 373ADD_SUB_EXTENDED(I,16,Add,RShift,add,sxth,32) 374ADD_SUB_EXTENDED(I,8,Add,RShift,add,sxtb,32) 375ADD_SUB_EXTENDED(I,8,Add,URShift,add,uxtb,32) 376ADD_SUB_EXTENDED(L,16,Add,RShift,add,sxth,64) 377ADD_SUB_EXTENDED(L,32,Add,RShift,add,sxtw,64) 378ADD_SUB_EXTENDED(L,8,Add,RShift,add,sxtb,64) 379ADD_SUB_EXTENDED(L,8,Add,URShift,add,uxtb,64) 380dnl 381dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, shift type) 382define(`ADD_SUB_ZERO_EXTEND', ` 383instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, rFlagsReg cr) 384%{ 385 match(Set dst ($3$1 src1 (And$1 src2 mask))); 386 ins_cost(INSN_COST); 387 format %{ "$4 $dst, $src1, $src2, $5" %} 388 389 ins_encode %{ 390 __ $4(as_Register($dst$$reg), as_Register($src1$$reg), 391 as_Register($src2$$reg), ext::$5); 392 %} 393 ins_pipe(ialu_reg_reg); 394%}') 395dnl 396ADD_SUB_ZERO_EXTEND(I,255,Add,addw,uxtb) 397ADD_SUB_ZERO_EXTEND(I,65535,Add,addw,uxth) 398ADD_SUB_ZERO_EXTEND(L,255,Add,add,uxtb) 399ADD_SUB_ZERO_EXTEND(L,65535,Add,add,uxth) 400ADD_SUB_ZERO_EXTEND(L,4294967295,Add,add,uxtw) 401dnl 402ADD_SUB_ZERO_EXTEND(I,255,Sub,subw,uxtb) 403ADD_SUB_ZERO_EXTEND(I,65535,Sub,subw,uxth) 404ADD_SUB_ZERO_EXTEND(L,255,Sub,sub,uxtb) 405ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth) 406ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw) 407dnl 408dnl ADD_SUB_ZERO_EXTEND_SHIFT(mode, size, add node, insn, ext type) 409define(`ADD_SUB_EXTENDED_SHIFT', ` 410instruct $3Ext$1_$6_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immIExt lshift2, immI_`'eval($7-$2) lshift1, immI_`'eval($7-$2) rshift1, rFlagsReg cr) 411%{ 412 match(Set dst ($3$1 src1 (LShift$1 EXTEND($1, $4, src2, lshift1, rshift1) lshift2))); 413 ins_cost(1.9 * INSN_COST); 414 format %{ "$5 $dst, $src1, $src2, $6 #lshift2" %} 415 416 ins_encode %{ 417 __ $5(as_Register($dst$$reg), as_Register($src1$$reg), 418 as_Register($src2$$reg), ext::$6, ($lshift2$$constant)); 419 %} 420 ins_pipe(ialu_reg_reg_shift); 421%}') 422dnl $1 $2 $3 $4 $5 $6 $7 423ADD_SUB_EXTENDED_SHIFT(L,8,Add,RShift,add,sxtb,64) 424ADD_SUB_EXTENDED_SHIFT(L,16,Add,RShift,add,sxth,64) 425ADD_SUB_EXTENDED_SHIFT(L,32,Add,RShift,add,sxtw,64) 426dnl 427ADD_SUB_EXTENDED_SHIFT(L,8,Sub,RShift,sub,sxtb,64) 428ADD_SUB_EXTENDED_SHIFT(L,16,Sub,RShift,sub,sxth,64) 429ADD_SUB_EXTENDED_SHIFT(L,32,Sub,RShift,sub,sxtw,64) 430dnl 431ADD_SUB_EXTENDED_SHIFT(I,8,Add,RShift,addw,sxtb,32) 432ADD_SUB_EXTENDED_SHIFT(I,16,Add,RShift,addw,sxth,32) 433dnl 434ADD_SUB_EXTENDED_SHIFT(I,8,Sub,RShift,subw,sxtb,32) 435ADD_SUB_EXTENDED_SHIFT(I,16,Sub,RShift,subw,sxth,32) 436dnl 437dnl ADD_SUB_CONV_SHIFT(mode, add node, insn, ext type) 438define(`ADD_SUB_CONV_SHIFT', ` 439instruct $2ExtI_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr) 440%{ 441 match(Set dst ($2$1 src1 (LShiftL (ConvI2L src2) lshift))); 442 ins_cost(1.9 * INSN_COST); 443 format %{ "$3 $dst, $src1, $src2, $4 #lshift" %} 444 445 ins_encode %{ 446 __ $3(as_Register($dst$$reg), as_Register($src1$$reg), 447 as_Register($src2$$reg), ext::$4, ($lshift$$constant)); 448 %} 449 ins_pipe(ialu_reg_reg_shift); 450%}') 451dnl 452ADD_SUB_CONV_SHIFT(L,Add,add,sxtw); 453ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw); 454dnl 455dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, ext type) 456define(`ADD_SUB_ZERO_EXTEND_SHIFT', ` 457instruct $3Ext$1_$5_and_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, immIExt lshift, rFlagsReg cr) 458%{ 459 match(Set dst ($3$1 src1 (LShift$1 (And$1 src2 mask) lshift))); 460 ins_cost(1.9 * INSN_COST); 461 format %{ "$4 $dst, $src1, $src2, $5 #lshift" %} 462 463 ins_encode %{ 464 __ $4(as_Register($dst$$reg), as_Register($src1$$reg), 465 as_Register($src2$$reg), ext::$5, ($lshift$$constant)); 466 %} 467 ins_pipe(ialu_reg_reg_shift); 468%}') 469dnl 470dnl $1 $2 $3 $4 $5 471ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Add,add,uxtb) 472ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Add,add,uxth) 473ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Add,add,uxtw) 474dnl 475ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Sub,sub,uxtb) 476ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Sub,sub,uxth) 477ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Sub,sub,uxtw) 478dnl 479ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Add,addw,uxtb) 480ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Add,addw,uxth) 481dnl 482ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Sub,subw,uxtb) 483ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Sub,subw,uxth) 484dnl 485// END This section of the file is automatically generated. Do not edit -------------- 486