1 { 2 Copyright (c) 2000-2002 by Florian Klaempfl 3 4 Code generation for add nodes on the ARM 5 6 This program is free software; you can redistribute it and/or modify 7 it under the terms of the GNU General Public License as published by 8 the Free Software Foundation; either version 2 of the License, or 9 (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the Free Software 18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 20 **************************************************************************** 21 } 22 unit narmadd; 23 24 {$i fpcdefs.inc} 25 26 interface 27 28 uses 29 node,ncgadd,cpubase; 30 31 type 32 tarmaddnode = class(tcgaddnode) 33 private GetResFlagsnull34 function GetResFlags(unsigned:Boolean):TResFlags; GetFpuResFlagsnull35 function GetFpuResFlags:TResFlags; 36 public use_fmanull37 function use_fma : boolean;override; pass_1null38 function pass_1 : tnode;override; use_generic_mul32to64null39 function use_generic_mul32to64: boolean; override; use_generic_mul64bitnull40 function use_generic_mul64bit: boolean; override; 41 protected first_addfloatnull42 function first_addfloat: tnode; override; 43 procedure second_addordinal;override; 44 procedure second_addfloat;override; 45 procedure second_cmpfloat;override; 46 procedure second_cmpordinal;override; 47 procedure second_cmpsmallset;override; 48 procedure second_cmp64bit;override; 49 procedure second_add64bit;override; 50 end; 51 52 implementation 53 54 uses 55 globtype,verbose,globals,systems, 56 constexp,symdef,symtable,symtype,symconst, 57 aasmbase,aasmdata,aasmcpu, 58 defutil,htypechk,cgbase,cgutils, 59 cpuinfo,pass_1,pass_2,procinfo, 60 ncon,nadd,ncnv,ncal,nmat, 61 ncgutil,cgobj,cgcpu, 62 hlcgobj 63 ; 64 65 {***************************************************************************** 66 TSparcAddNode 67 *****************************************************************************} 68 tarmaddnode.GetResFlagsnull69 function tarmaddnode.GetResFlags(unsigned:Boolean):TResFlags; 70 begin 71 case NodeType of 72 equaln: 73 GetResFlags:=F_EQ; 74 unequaln: 75 GetResFlags:=F_NE; 76 else 77 if not(unsigned) then 78 begin 79 if nf_swapped in flags then 80 case NodeType of 81 ltn: 82 GetResFlags:=F_GT; 83 lten: 84 GetResFlags:=F_GE; 85 gtn: 86 GetResFlags:=F_LT; 87 gten: 88 GetResFlags:=F_LE; 89 else 90 internalerror(201408203); 91 end 92 else 93 case NodeType of 94 ltn: 95 GetResFlags:=F_LT; 96 lten: 97 GetResFlags:=F_LE; 98 gtn: 99 GetResFlags:=F_GT; 100 gten: 101 GetResFlags:=F_GE; 102 else 103 internalerror(201408204); 104 end; 105 end 106 else 107 begin 108 if nf_swapped in Flags then 109 case NodeType of 110 ltn: 111 GetResFlags:=F_HI; 112 lten: 113 GetResFlags:=F_CS; 114 gtn: 115 GetResFlags:=F_CC; 116 gten: 117 GetResFlags:=F_LS; 118 else 119 internalerror(201408205); 120 end 121 else 122 case NodeType of 123 ltn: 124 GetResFlags:=F_CC; 125 lten: 126 GetResFlags:=F_LS; 127 gtn: 128 GetResFlags:=F_HI; 129 gten: 130 GetResFlags:=F_CS; 131 else 132 internalerror(201408206); 133 end; 134 end; 135 end; 136 end; 137 138 tarmaddnode.GetFpuResFlagsnull139 function tarmaddnode.GetFpuResFlags:TResFlags; 140 begin 141 if nf_swapped in Flags then 142 internalerror(2014042001); 143 case NodeType of 144 equaln: 145 result:=F_EQ; 146 unequaln: 147 result:=F_NE; 148 ltn: 149 result:=F_MI; 150 lten: 151 result:=F_LS; 152 gtn: 153 result:=F_GT; 154 gten: 155 result:=F_GE; 156 else 157 internalerror(201408207); 158 end; 159 end; 160 161 tarmaddnode.use_fmanull162 function tarmaddnode.use_fma : boolean; 163 begin 164 Result:=current_settings.fputype in [fpu_vfpv4]; 165 end; 166 167 168 procedure tarmaddnode.second_addfloat; 169 var 170 op : TAsmOp; 171 singleprec: boolean; 172 pf: TOpPostfix; 173 begin 174 pass_left_right; 175 if (nf_swapped in flags) then 176 swapleftright; 177 178 case current_settings.fputype of 179 fpu_fpa, 180 fpu_fpa10, 181 fpu_fpa11: 182 begin 183 { force fpureg as location, left right doesn't matter 184 as both will be in a fpureg } 185 hlcg.location_force_fpureg(current_asmdata.CurrAsmList,left.location,left.resultdef,true); 186 hlcg.location_force_fpureg(current_asmdata.CurrAsmList,right.location,right.resultdef,true); 187 188 location_reset(location,LOC_FPUREGISTER,def_cgsize(resultdef)); 189 location.register:=cg.getfpuregister(current_asmdata.CurrAsmList,location.size); 190 191 case nodetype of 192 addn : 193 op:=A_ADF; 194 muln : 195 op:=A_MUF; 196 subn : 197 op:=A_SUF; 198 slashn : 199 op:=A_DVF; 200 else 201 internalerror(200308313); 202 end; 203 204 current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg_reg(op, 205 location.register,left.location.register,right.location.register), 206 cgsize2fpuoppostfix[def_cgsize(resultdef)])); 207 end; 208 fpu_vfpv2, 209 fpu_vfpv3, 210 fpu_vfpv4, 211 fpu_vfpv3_d16: 212 begin 213 { force mmreg as location, left right doesn't matter 214 as both will be in a fpureg } 215 hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,left.location,left.resultdef,true); 216 hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,right.location,right.resultdef,true); 217 218 location_reset(location,LOC_MMREGISTER,def_cgsize(resultdef)); 219 location.register:=cg.getmmregister(current_asmdata.CurrAsmList,location.size); 220 221 singleprec:=tfloatdef(left.resultdef).floattype=s32real; 222 if singleprec then 223 pf:=PF_F32 224 else 225 pf:=PF_F64; 226 case nodetype of 227 addn : 228 op:=A_VADD; 229 muln : 230 op:=A_VMUL; 231 subn : 232 op:=A_VSUB; 233 slashn : 234 op:=A_VDIV; 235 else 236 internalerror(2009111401); 237 end; 238 239 current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg_reg(op, 240 location.register,left.location.register,right.location.register),pf)); 241 cg.maybe_check_for_fpu_exception(current_asmdata.CurrAsmList); 242 end; 243 fpu_fpv4_s16: 244 begin 245 { force mmreg as location, left right doesn't matter 246 as both will be in a fpureg } 247 hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,left.location,left.resultdef,true); 248 hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,right.location,right.resultdef,true); 249 250 location_reset(location,LOC_MMREGISTER,def_cgsize(resultdef)); 251 location.register:=cg.getmmregister(current_asmdata.CurrAsmList,location.size); 252 253 case nodetype of 254 addn : 255 op:=A_VADD; 256 muln : 257 op:=A_VMUL; 258 subn : 259 op:=A_VSUB; 260 slashn : 261 op:=A_VDIV; 262 else 263 internalerror(2009111401); 264 end; 265 266 current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg_reg(op, location.register,left.location.register,right.location.register), PF_F32)); 267 cg.maybe_check_for_fpu_exception(current_asmdata.CurrAsmList); 268 end; 269 fpu_soft: 270 { this case should be handled already by pass1 } 271 internalerror(200308252); 272 else 273 internalerror(200308251); 274 end; 275 end; 276 277 278 procedure tarmaddnode.second_cmpfloat; 279 var 280 op: TAsmOp; 281 pf: TOpPostfix; 282 begin 283 pass_left_right; 284 if (nf_swapped in flags) then 285 swapleftright; 286 287 location_reset(location,LOC_FLAGS,OS_NO); 288 location.resflags:=getresflags(false); 289 290 case current_settings.fputype of 291 fpu_fpa, 292 fpu_fpa10, 293 fpu_fpa11: 294 begin 295 { force fpureg as location, left right doesn't matter 296 as both will be in a fpureg } 297 hlcg.location_force_fpureg(current_asmdata.CurrAsmList,left.location,left.resultdef,true); 298 hlcg.location_force_fpureg(current_asmdata.CurrAsmList,right.location,right.resultdef,true); 299 300 cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 301 if nodetype in [equaln,unequaln] then 302 current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg(A_CMF, 303 left.location.register,right.location.register), 304 cgsize2fpuoppostfix[def_cgsize(resultdef)])) 305 else 306 current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg(A_CMFE, 307 left.location.register,right.location.register), 308 cgsize2fpuoppostfix[def_cgsize(resultdef)])); 309 end; 310 fpu_vfpv2, 311 fpu_vfpv3, 312 fpu_vfpv4, 313 fpu_vfpv3_d16: 314 begin 315 hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,left.location,left.resultdef,true); 316 hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,right.location,right.resultdef,true); 317 318 if nodetype in [equaln,unequaln] then 319 op:=A_VCMP 320 else 321 op:=A_VCMPE; 322 323 if (tfloatdef(left.resultdef).floattype=s32real) then 324 pf:=PF_F32 325 else 326 pf:=PF_F64; 327 328 current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg(op, 329 left.location.register,right.location.register), pf)); 330 cg.maybe_check_for_fpu_exception(current_asmdata.CurrAsmList); 331 cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 332 current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_VMRS,NR_APSR_nzcv,NR_FPSCR)); 333 location.resflags:=GetFpuResFlags; 334 end; 335 fpu_fpv4_s16: 336 begin 337 hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,left.location,left.resultdef,true); 338 hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,right.location,right.resultdef,true); 339 340 if nodetype in [equaln,unequaln] then 341 op:=A_VCMP 342 else 343 op:=A_VCMPE; 344 345 current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg(op, 346 left.location.register,right.location.register),PF_F32)); 347 cg.maybe_check_for_fpu_exception(current_asmdata.CurrAsmList); 348 cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 349 current_asmdata.CurrAsmList.Concat(taicpu.op_reg_reg(A_VMRS, NR_APSR_nzcv, NR_FPSCR)); 350 end; 351 fpu_soft: 352 { this case should be handled already by pass1 } 353 internalerror(2009112404); 354 end; 355 end; 356 357 358 procedure tarmaddnode.second_cmpsmallset; 359 var 360 tmpreg : tregister; 361 b: byte; 362 begin 363 pass_left_right; 364 365 location_reset(location,LOC_FLAGS,OS_NO); 366 367 if (not(nf_swapped in flags) and 368 (nodetype = lten)) or 369 ((nf_swapped in flags) and 370 (nodetype = gten)) then 371 swapleftright; 372 373 (* Try to keep right as a constant *) 374 if (right.location.loc <> LOC_CONSTANT) or 375 not(is_shifter_const(right.location.value, b)) or 376 ((GenerateThumbCode) and not(is_thumb_imm(right.location.value))) then 377 hlcg.location_force_reg(current_asmdata.CurrAsmList,right.location,right.resultdef,right.resultdef,true); 378 hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,true); 379 380 case nodetype of 381 equaln, 382 unequaln: 383 begin 384 cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 385 if right.location.loc = LOC_CONSTANT then 386 current_asmdata.CurrAsmList.concat(taicpu.op_reg_const(A_CMP,left.location.register,right.location.value)) 387 else 388 current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_CMP,left.location.register,right.location.register)); 389 if nodetype = equaln then 390 location.resflags:=F_EQ 391 else 392 location.resflags:=F_NE; 393 end; 394 lten, 395 gten: 396 begin 397 tmpreg:=cg.getintregister(current_asmdata.CurrAsmList,location.size); 398 if right.location.loc = LOC_CONSTANT then 399 begin 400 cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_AND,OS_32,right.location.value,left.location.register,tmpreg); 401 cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 402 current_asmdata.CurrAsmList.concat(taicpu.op_reg_const(A_CMP,tmpreg,right.location.value)); 403 end 404 else 405 begin 406 cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList,OP_AND,OS_32,left.location.register,right.location.register,tmpreg); 407 cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 408 current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_CMP,tmpreg,right.location.register)); 409 end; 410 location.resflags:=F_EQ; 411 end; 412 else 413 internalerror(2004012401); 414 end; 415 end; 416 417 418 procedure tarmaddnode.second_cmp64bit; 419 var 420 unsigned : boolean; 421 oldnodetype : tnodetype; 422 dummyreg : tregister; 423 truelabel, falselabel: tasmlabel; 424 l: tasmlabel; 425 const 426 lt_zero_swapped: array[boolean] of tnodetype = (ltn, gtn); 427 begin 428 truelabel:=nil; 429 falselabel:=nil; 430 unsigned:=not(is_signed(left.resultdef)) or 431 not(is_signed(right.resultdef)); 432 433 pass_left_right; 434 435 { pass_left_right moves possible consts to the right, the only 436 remaining case with left consts (currency) can take this path too (KB) } 437 if (right.nodetype=ordconstn) and 438 (tordconstnode(right).value=0) and 439 ((nodetype in [equaln,unequaln]) or 440 (not(GenerateThumbCode) and is_signed(left.resultdef) and (nodetype = lt_zero_swapped[nf_swapped in Flags])) 441 ) then 442 begin 443 location_reset(location,LOC_FLAGS,OS_NO); 444 if not(left.location.loc in [LOC_CREGISTER,LOC_REGISTER]) then 445 hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,true); 446 447 cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 448 { Optimize for the common case of int64 < 0 } 449 if nodetype in [ltn, gtn] then 450 begin 451 {Just check for the MSB in reghi to be set or not, this is independed from nf_swapped} 452 location.resflags:=F_NE; 453 current_asmdata.CurrAsmList.concat(taicpu.op_reg_const(A_TST,left.location.register64.reghi, aint($80000000))); 454 end 455 else 456 begin 457 location.resflags:=getresflags(unsigned); 458 dummyreg:=cg.getintregister(current_asmdata.CurrAsmList,location.size); 459 460 if GenerateThumbCode then 461 cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList,OP_OR,OS_32,left.location.register64.reglo,left.location.register64.reghi,dummyreg) 462 else 463 current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg_reg(A_ORR,dummyreg,left.location.register64.reglo,left.location.register64.reghi),PF_S)); 464 end; 465 end 466 else 467 begin 468 hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,true); 469 hlcg.location_force_reg(current_asmdata.CurrAsmList,right.location,right.resultdef,right.resultdef,true); 470 471 { operation requiring proper N, Z and C flags ? } 472 if unsigned or (nodetype in [equaln,unequaln]) then 473 begin 474 location_reset(location,LOC_FLAGS,OS_NO); 475 location.resflags:=getresflags(unsigned); 476 cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 477 current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_CMP,left.location.register64.reghi,right.location.register64.reghi)); 478 if GenerateThumbCode or GenerateThumb2Code then 479 begin 480 current_asmdata.getjumplabel(l); 481 cg.a_jmp_flags(current_asmdata.CurrAsmList,F_NE,l); 482 current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_CMP,left.location.register64.reglo,right.location.register64.reglo)); 483 cg.a_label(current_asmdata.CurrAsmList,l); 484 end 485 else 486 current_asmdata.CurrAsmList.concat(setcondition(taicpu.op_reg_reg(A_CMP,left.location.register64.reglo,right.location.register64.reglo),C_EQ)); 487 end 488 else 489 { operation requiring proper N, Z and V flags ? } 490 begin 491 current_asmdata.getjumplabel(truelabel); 492 current_asmdata.getjumplabel(falselabel); 493 location_reset_jump(location,truelabel,falselabel); 494 cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 495 current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_CMP,left.location.register64.reghi,right.location.register64.reghi)); 496 { the jump the sequence is a little bit hairy } 497 case nodetype of 498 ltn,gtn: 499 begin 500 cg.a_jmp_flags(current_asmdata.CurrAsmList,getresflags(false),location.truelabel); 501 { cheat a little bit for the negative test } 502 toggleflag(nf_swapped); 503 cg.a_jmp_flags(current_asmdata.CurrAsmList,getresflags(false),location.falselabel); 504 cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 505 toggleflag(nf_swapped); 506 end; 507 lten,gten: 508 begin 509 oldnodetype:=nodetype; 510 if nodetype=lten then 511 nodetype:=ltn 512 else 513 nodetype:=gtn; 514 cg.a_jmp_flags(current_asmdata.CurrAsmList,getresflags(unsigned),location.truelabel); 515 { cheat for the negative test } 516 if nodetype=ltn then 517 nodetype:=gtn 518 else 519 nodetype:=ltn; 520 cg.a_jmp_flags(current_asmdata.CurrAsmList,getresflags(unsigned),location.falselabel); 521 cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 522 nodetype:=oldnodetype; 523 end; 524 end; 525 cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 526 current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_CMP,left.location.register64.reglo,right.location.register64.reglo)); 527 { the comparisaion of the low dword have to be 528 always unsigned! } 529 cg.a_jmp_flags(current_asmdata.CurrAsmList,getresflags(true),location.truelabel); 530 cg.a_jmp_always(current_asmdata.CurrAsmList,location.falselabel); 531 cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 532 end; 533 end; 534 end; 535 536 procedure tarmaddnode.second_add64bit; 537 var 538 asmList : TAsmList; 539 ll,rl,res : TRegister64; 540 tmpreg: TRegister; 541 begin 542 if (nodetype in [muln]) then 543 begin 544 asmList := current_asmdata.CurrAsmList; 545 pass_left_right; 546 force_reg_left_right(true, (left.location.loc<>LOC_CONSTANT) and (right.location.loc<>LOC_CONSTANT)); 547 set_result_location_reg; 548 549 { shortcuts to register64s } 550 ll:=left.location.register64; 551 rl:=right.location.register64; 552 res:=location.register64; 553 554 tmpreg := cg.getintregister(current_asmdata.CurrAsmList,OS_32); 555 asmList.concat(taicpu.op_reg_reg_reg(A_MUL,tmpreg,ll.reglo,rl.reghi)); 556 asmList.concat(taicpu.op_reg_reg_reg_reg(A_UMULL,res.reglo,res.reghi,rl.reglo,ll.reglo)); 557 tbasecgarm(cg).safe_mla(asmList,tmpreg,rl.reglo,ll.reghi,tmpreg); 558 asmList.concat(taicpu.op_reg_reg_reg(A_ADD,res.reghi,tmpreg,res.reghi)); 559 end 560 else 561 inherited second_add64bit; 562 end; 563 tarmaddnode.pass_1null564 function tarmaddnode.pass_1 : tnode; 565 var 566 unsigned : boolean; 567 begin 568 result:=inherited pass_1; 569 570 if not(assigned(result)) then 571 begin 572 unsigned:=not(is_signed(left.resultdef)) or 573 not(is_signed(right.resultdef)); 574 575 if is_64bit(left.resultdef) and 576 ((nodetype in [equaln,unequaln]) or 577 (unsigned and (nodetype in [ltn,lten,gtn,gten])) 578 ) then 579 expectloc:=LOC_FLAGS; 580 end; 581 end; 582 tarmaddnode.first_addfloatnull583 function tarmaddnode.first_addfloat: tnode; 584 var 585 procname: string[31]; 586 { do we need to reverse the result ? } 587 notnode : boolean; 588 fdef : tdef; 589 begin 590 result := nil; 591 notnode := false; 592 593 if current_settings.fputype = fpu_fpv4_s16 then 594 begin 595 case tfloatdef(left.resultdef).floattype of 596 s32real: 597 begin 598 result:=nil; 599 notnode:=false; 600 end; 601 s64real: 602 begin 603 fdef:=search_system_type('FLOAT64').typedef; 604 procname:='float64'; 605 606 case nodetype of 607 addn: 608 procname:=procname+'_add'; 609 muln: 610 procname:=procname+'_mul'; 611 subn: 612 procname:=procname+'_sub'; 613 slashn: 614 procname:=procname+'_div'; 615 ltn: 616 procname:=procname+'_lt'; 617 lten: 618 procname:=procname+'_le'; 619 gtn: 620 begin 621 procname:=procname+'_lt'; 622 swapleftright; 623 end; 624 gten: 625 begin 626 procname:=procname+'_le'; 627 swapleftright; 628 end; 629 equaln: 630 procname:=procname+'_eq'; 631 unequaln: 632 begin 633 procname:=procname+'_eq'; 634 notnode:=true; 635 end; 636 else 637 CGMessage3(type_e_operator_not_supported_for_types,node2opstr(nodetype),left.resultdef.typename,right.resultdef.typename); 638 end; 639 640 if nodetype in [ltn,lten,gtn,gten,equaln,unequaln] then 641 resultdef:=pasbool1type; 642 result:=ctypeconvnode.create_internal(ccallnode.createintern(procname,ccallparanode.create( 643 ctypeconvnode.create_internal(right,fdef), 644 ccallparanode.create( 645 ctypeconvnode.create_internal(left,fdef),nil))),resultdef); 646 647 left:=nil; 648 right:=nil; 649 650 { do we need to reverse the result } 651 if notnode then 652 result:=cnotnode.create(result); 653 end; 654 end; 655 end 656 else 657 result:=inherited first_addfloat; 658 end; 659 660 661 procedure tarmaddnode.second_cmpordinal; 662 var 663 unsigned : boolean; 664 tmpreg : tregister; 665 b : byte; 666 begin 667 pass_left_right; 668 force_reg_left_right(true,true); 669 670 unsigned:=not(is_signed(left.resultdef)) or 671 not(is_signed(right.resultdef)); 672 cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS); 673 if right.location.loc = LOC_CONSTANT then 674 begin 675 if (not(GenerateThumbCode) and is_shifter_const(right.location.value,b)) or 676 ((GenerateThumbCode) and is_thumb_imm(right.location.value)) then 677 current_asmdata.CurrAsmList.concat(taicpu.op_reg_const(A_CMP,left.location.register,right.location.value)) 678 else 679 begin 680 tmpreg:=cg.getintregister(current_asmdata.CurrAsmList,location.size); 681 cg.a_load_const_reg(current_asmdata.CurrAsmList,OS_INT, 682 right.location.value,tmpreg); 683 current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_CMP,left.location.register,tmpreg)); 684 end; 685 end 686 else 687 current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_CMP,left.location.register,right.location.register)); 688 689 location_reset(location,LOC_FLAGS,OS_NO); 690 location.resflags:=getresflags(unsigned); 691 end; 692 693 const 694 multops: array[boolean] of TAsmOp = (A_SMULL, A_UMULL); 695 696 procedure tarmaddnode.second_addordinal; 697 var 698 unsigned: boolean; 699 begin 700 if (nodetype=muln) and 701 is_64bit(resultdef) and 702 not(GenerateThumbCode) and 703 (CPUARM_HAS_UMULL in cpu_capabilities[current_settings.cputype]) then 704 begin 705 pass_left_right; 706 force_reg_left_right(true, false); 707 set_result_location_reg; 708 unsigned:=not(is_signed(left.resultdef)) or 709 not(is_signed(right.resultdef)); 710 current_asmdata.CurrAsmList.Concat( 711 taicpu.op_reg_reg_reg_reg(multops[unsigned], location.register64.reglo, location.register64.reghi, 712 left.location.register,right.location.register)); 713 end 714 else 715 inherited second_addordinal; 716 end; 717 tarmaddnode.use_generic_mul32to64null718 function tarmaddnode.use_generic_mul32to64: boolean; 719 begin 720 result:=GenerateThumbCode or not(CPUARM_HAS_UMULL in cpu_capabilities[current_settings.cputype]); 721 end; 722 tarmaddnode.use_generic_mul64bitnull723 function tarmaddnode.use_generic_mul64bit: boolean; 724 begin 725 result:=GenerateThumbCode or 726 not(CPUARM_HAS_UMULL in cpu_capabilities[current_settings.cputype]) or 727 (cs_check_overflow in current_settings.localswitches); 728 end; 729 730 begin 731 caddnode:=tarmaddnode; 732 end. 733