1/* Definitions of x86 tunable features. 2 Copyright (C) 2013-2021 Free Software Foundation, Inc. 3 4This file is part of GCC. 5 6GCC is free software; you can redistribute it and/or modify 7it under the terms of the GNU General Public License as published by 8the Free Software Foundation; either version 3, or (at your option) 9any later version. 10 11GCC is distributed in the hope that it will be useful, 12but WITHOUT ANY WARRANTY; without even the implied warranty of 13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14GNU General Public License for more details. 15 16You should have received a copy of the GNU General Public License and 17a copy of the GCC Runtime Library Exception along with this program; 18see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 19<http://www.gnu.org/licenses/>. */ 20 21/* Tuning for a given CPU XXXX consists of: 22 - adding new CPU into: 23 - adding PROCESSOR_XXX to processor_type (in i386.h) 24 - possibly adding XXX into CPU attribute in i386.md 25 - adding XXX to processor_alias_table (in i386.c) 26 - introducing ix86_XXX_cost in i386.c 27 - Stringop generation table can be build based on test_stringop 28 - script (once rest of tuning is complete) 29 - designing a scheduler model in 30 - XXXX.md file 31 - Updating ix86_issue_rate and ix86_adjust_cost in i386.md 32 - possibly updating ia32_multipass_dfa_lookahead, ix86_sched_reorder 33 and ix86_sched_init_global if those tricks are needed. 34 - Tunning the flags bellow. Those are split into sections and each 35 section is very roughly ordered by importance. */ 36 37/*****************************************************************************/ 38/* Scheduling flags. */ 39/*****************************************************************************/ 40 41/* X86_TUNE_SCHEDULE: Enable scheduling. */ 42DEF_TUNE (X86_TUNE_SCHEDULE, "schedule", 43 m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL | m_SILVERMONT 44 | m_INTEL | m_KNL | m_KNM | m_K6_GEODE | m_AMD_MULTIPLE | m_GOLDMONT 45 | m_GOLDMONT_PLUS | m_TREMONT | m_GENERIC) 46 47/* X86_TUNE_PARTIAL_REG_DEPENDENCY: Enable more register renaming 48 on modern chips. Preffer stores affecting whole integer register 49 over partial stores. For example preffer MOVZBL or MOVQ to load 8bit 50 value over movb. */ 51DEF_TUNE (X86_TUNE_PARTIAL_REG_DEPENDENCY, "partial_reg_dependency", 52 m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 53 | m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL 54 | m_KNL | m_KNM | m_AMD_MULTIPLE | m_TREMONT 55 | m_GENERIC) 56 57/* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: This knob promotes all store 58 destinations to be 128bit to allow register renaming on 128bit SSE units, 59 but usually results in one extra microop on 64bit SSE units. 60 Experimental results shows that disabling this option on P4 brings over 20% 61 SPECfp regression, while enabling it on K8 brings roughly 2.4% regression 62 that can be partly masked by careful scheduling of moves. */ 63DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY, "sse_partial_reg_dependency", 64 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10 65 | m_BDVER | m_ZNVER | m_GENERIC) 66 67/* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies 68 are resolved on SSE register parts instead of whole registers, so we may 69 maintain just lower part of scalar values in proper format leaving the 70 upper part undefined. */ 71DEF_TUNE (X86_TUNE_SSE_SPLIT_REGS, "sse_split_regs", m_ATHLON_K8) 72 73/* X86_TUNE_PARTIAL_FLAG_REG_STALL: this flag disables use of flags 74 set by instructions affecting just some flags (in particular shifts). 75 This is because Core2 resolves dependencies on whole flags register 76 and such sequences introduce false dependency on previous instruction 77 setting full flags. 78 79 The flags does not affect generation of INC and DEC that is controlled 80 by X86_TUNE_USE_INCDEC. */ 81 82DEF_TUNE (X86_TUNE_PARTIAL_FLAG_REG_STALL, "partial_flag_reg_stall", 83 m_CORE2) 84 85/* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid 86 partial dependencies. */ 87DEF_TUNE (X86_TUNE_MOVX, "movx", 88 m_PPRO | m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE 89 | m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_KNL | m_KNM | m_INTEL 90 | m_GOLDMONT_PLUS | m_GEODE | m_AMD_MULTIPLE 91 | m_CORE_AVX2 | m_TREMONT | m_GENERIC) 92 93/* X86_TUNE_MEMORY_MISMATCH_STALL: Avoid partial stores that are followed by 94 full sized loads. */ 95DEF_TUNE (X86_TUNE_MEMORY_MISMATCH_STALL, "memory_mismatch_stall", 96 m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_INTEL 97 | m_KNL | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS | m_AMD_MULTIPLE 98 | m_TREMONT | m_GENERIC) 99 100/* X86_TUNE_FUSE_CMP_AND_BRANCH_32: Fuse compare with a subsequent 101 conditional jump instruction for 32 bit TARGET. */ 102DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_32, "fuse_cmp_and_branch_32", 103 m_CORE_ALL | m_BDVER | m_ZNVER | m_GENERIC) 104 105/* X86_TUNE_FUSE_CMP_AND_BRANCH_64: Fuse compare with a subsequent 106 conditional jump instruction for TARGET_64BIT. */ 107DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_64, "fuse_cmp_and_branch_64", 108 m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_BDVER 109 | m_ZNVER | m_GENERIC) 110 111/* X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS: Fuse compare with a 112 subsequent conditional jump instruction when the condition jump 113 check sign flag (SF) or overflow flag (OF). */ 114DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS, "fuse_cmp_and_branch_soflags", 115 m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_BDVER 116 | m_ZNVER | m_GENERIC) 117 118/* X86_TUNE_FUSE_ALU_AND_BRANCH: Fuse alu with a subsequent conditional 119 jump instruction when the alu instruction produces the CCFLAG consumed by 120 the conditional jump instruction. */ 121DEF_TUNE (X86_TUNE_FUSE_ALU_AND_BRANCH, "fuse_alu_and_branch", 122 m_SANDYBRIDGE | m_CORE_AVX2 | m_GENERIC) 123 124 125/*****************************************************************************/ 126/* Function prologue, epilogue and function calling sequences. */ 127/*****************************************************************************/ 128 129/* X86_TUNE_ACCUMULATE_OUTGOING_ARGS: Allocate stack space for outgoing 130 arguments in prologue/epilogue instead of separately for each call 131 by push/pop instructions. 132 This increase code size by about 5% in 32bit mode, less so in 64bit mode 133 because parameters are passed in registers. It is considerable 134 win for targets without stack engine that prevents multple push operations 135 to happen in parallel. */ 136 137DEF_TUNE (X86_TUNE_ACCUMULATE_OUTGOING_ARGS, "accumulate_outgoing_args", 138 m_PPRO | m_P4_NOCONA | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL 139 | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ATHLON_K8) 140 141/* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in prologues that are 142 considered on critical path. */ 143DEF_TUNE (X86_TUNE_PROLOGUE_USING_MOVE, "prologue_using_move", 144 m_PPRO | m_ATHLON_K8) 145 146/* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in epilogues that are 147 considered on critical path. */ 148DEF_TUNE (X86_TUNE_EPILOGUE_USING_MOVE, "epilogue_using_move", 149 m_PPRO | m_ATHLON_K8) 150 151/* X86_TUNE_USE_LEAVE: Use "leave" instruction in epilogues where it fits. */ 152DEF_TUNE (X86_TUNE_USE_LEAVE, "use_leave", 153 m_386 | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC) 154 155/* X86_TUNE_PUSH_MEMORY: Enable generation of "push mem" instructions. 156 Some chips, like 486 and Pentium works faster with separate load 157 and push instructions. */ 158DEF_TUNE (X86_TUNE_PUSH_MEMORY, "push_memory", 159 m_386 | m_P4_NOCONA | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE 160 | m_GENERIC) 161 162/* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred 163 over esp subtraction. */ 164DEF_TUNE (X86_TUNE_SINGLE_PUSH, "single_push", m_386 | m_486 | m_PENT 165 | m_LAKEMONT | m_K6_GEODE) 166 167/* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred 168 over esp subtraction. */ 169DEF_TUNE (X86_TUNE_DOUBLE_PUSH, "double_push", m_PENT | m_LAKEMONT 170 | m_K6_GEODE) 171 172/* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred 173 over esp addition. */ 174DEF_TUNE (X86_TUNE_SINGLE_POP, "single_pop", m_386 | m_486 | m_PENT 175 | m_LAKEMONT | m_PPRO) 176 177/* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred 178 over esp addition. */ 179DEF_TUNE (X86_TUNE_DOUBLE_POP, "double_pop", m_PENT | m_LAKEMONT) 180 181/*****************************************************************************/ 182/* Branch predictor tuning */ 183/*****************************************************************************/ 184 185/* X86_TUNE_PAD_SHORT_FUNCTION: Make every function to be at least 4 186 instructions long. */ 187DEF_TUNE (X86_TUNE_PAD_SHORT_FUNCTION, "pad_short_function", m_BONNELL) 188 189/* X86_TUNE_PAD_RETURNS: Place NOP before every RET that is a destination 190 of conditional jump or directly preceded by other jump instruction. 191 This is important for AND K8-AMDFAM10 because the branch prediction 192 architecture expect at most one jump per 2 byte window. Failing to 193 pad returns leads to misaligned return stack. */ 194DEF_TUNE (X86_TUNE_PAD_RETURNS, "pad_returns", 195 m_ATHLON_K8 | m_AMDFAM10) 196 197/* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more 198 than 4 branch instructions in the 16 byte window. */ 199DEF_TUNE (X86_TUNE_FOUR_JUMP_LIMIT, "four_jump_limit", 200 m_PPRO | m_P4_NOCONA | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM 201 | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_INTEL | m_ATHLON_K8 202 | m_AMDFAM10) 203 204/*****************************************************************************/ 205/* Integer instruction selection tuning */ 206/*****************************************************************************/ 207 208/* X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL: Enable software prefetching 209 at -O3. For the moment, the prefetching seems badly tuned for Intel 210 chips. */ 211DEF_TUNE (X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL, "software_prefetching_beneficial", 212 m_K6_GEODE | m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER) 213 214/* X86_TUNE_LCP_STALL: Avoid an expensive length-changing prefix stall 215 on 16-bit immediate moves into memory on Core2 and Corei7. */ 216DEF_TUNE (X86_TUNE_LCP_STALL, "lcp_stall", m_CORE_ALL | m_GENERIC) 217 218/* X86_TUNE_READ_MODIFY: Enable use of read-modify instructions such 219 as "add mem, reg". */ 220DEF_TUNE (X86_TUNE_READ_MODIFY, "read_modify", ~(m_PENT | m_LAKEMONT | m_PPRO)) 221 222/* X86_TUNE_USE_INCDEC: Enable use of inc/dec instructions. 223 224 Core2 and nehalem has stall of 7 cycles for partial flag register stalls. 225 Sandy bridge and Ivy bridge generate extra uop. On Haswell this extra uop 226 is output only when the values needs to be really merged, which is not 227 done by GCC generated code. */ 228DEF_TUNE (X86_TUNE_USE_INCDEC, "use_incdec", 229 ~(m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE 230 | m_BONNELL | m_SILVERMONT | m_INTEL | m_KNL | m_KNM | m_GOLDMONT 231 | m_GOLDMONT_PLUS | m_TREMONT | m_GENERIC)) 232 233/* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred 234 for DFmode copies */ 235DEF_TUNE (X86_TUNE_INTEGER_DFMODE_MOVES, "integer_dfmode_moves", 236 ~(m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT 237 | m_KNL | m_KNM | m_INTEL | m_GEODE | m_AMD_MULTIPLE | m_GOLDMONT 238 | m_GOLDMONT_PLUS | m_TREMONT | m_GENERIC)) 239 240/* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag 241 will impact LEA instruction selection. */ 242DEF_TUNE (X86_TUNE_OPT_AGU, "opt_agu", m_BONNELL | m_SILVERMONT | m_KNL 243 | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_INTEL) 244 245/* X86_TUNE_AVOID_LEA_FOR_ADDR: Avoid lea for address computation. */ 246DEF_TUNE (X86_TUNE_AVOID_LEA_FOR_ADDR, "avoid_lea_for_addr", 247 m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT 248 | m_KNL | m_KNM) 249 250/* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is 251 vector path on AMD machines. 252 FIXME: Do we need to enable this for core? */ 253DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM32_MEM, "slow_imul_imm32_mem", 254 m_K8 | m_AMDFAM10) 255 256/* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD 257 machines. 258 FIXME: Do we need to enable this for core? */ 259DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM8, "slow_imul_imm8", 260 m_K8 | m_AMDFAM10) 261 262/* X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE: Try to avoid memory operands for 263 a conditional move. */ 264DEF_TUNE (X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE, "avoid_mem_opnd_for_cmove", 265 m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_KNL 266 | m_KNM | m_TREMONT | m_INTEL) 267 268/* X86_TUNE_SINGLE_STRINGOP: Enable use of single string operations, such 269 as MOVS and STOS (without a REP prefix) to move/set sequences of bytes. */ 270DEF_TUNE (X86_TUNE_SINGLE_STRINGOP, "single_stringop", m_386 | m_P4_NOCONA) 271 272/* X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB: Enable use of REP MOVSB/STOSB to 273 move/set sequences of bytes with known size. */ 274DEF_TUNE (X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB, 275 "prefer_known_rep_movsb_stosb", 276 m_SKYLAKE | m_ALDERLAKE | m_CORE_AVX512) 277 278/* X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES: Enable generation of 279 compact prologues and epilogues by issuing a misaligned moves. This 280 requires target to handle misaligned moves and partial memory stalls 281 reasonably well. 282 FIXME: This may actualy be a win on more targets than listed here. */ 283DEF_TUNE (X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES, 284 "misaligned_move_string_pro_epilogues", 285 m_386 | m_486 | m_CORE_ALL | m_AMD_MULTIPLE | m_GENERIC) 286 287/* X86_TUNE_USE_SAHF: Controls use of SAHF. */ 288DEF_TUNE (X86_TUNE_USE_SAHF, "use_sahf", 289 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT 290 | m_KNL | m_KNM | m_INTEL | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER 291 | m_BTVER | m_ZNVER | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT 292 | m_GENERIC) 293 294/* X86_TUNE_USE_CLTD: Controls use of CLTD and CTQO instructions. */ 295DEF_TUNE (X86_TUNE_USE_CLTD, "use_cltd", 296 ~(m_PENT | m_LAKEMONT | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL 297 | m_K6 | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT)) 298 299/* X86_TUNE_USE_BT: Enable use of BT (bit test) instructions. */ 300DEF_TUNE (X86_TUNE_USE_BT, "use_bt", 301 m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL 302 | m_LAKEMONT | m_AMD_MULTIPLE | m_GOLDMONT | m_GOLDMONT_PLUS 303 | m_TREMONT | m_GENERIC) 304 305/* X86_TUNE_AVOID_FALSE_DEP_FOR_BMI: Avoid false dependency 306 for bit-manipulation instructions. */ 307DEF_TUNE (X86_TUNE_AVOID_FALSE_DEP_FOR_BMI, "avoid_false_dep_for_bmi", 308 m_SANDYBRIDGE | m_CORE_AVX2 | m_GENERIC) 309 310/* X86_TUNE_ADJUST_UNROLL: This enables adjusting the unroll factor based 311 on hardware capabilities. Bdver3 hardware has a loop buffer which makes 312 unrolling small loop less important. For, such architectures we adjust 313 the unroll factor so that the unrolled loop fits the loop buffer. */ 314DEF_TUNE (X86_TUNE_ADJUST_UNROLL, "adjust_unroll_factor", m_BDVER3 | m_BDVER4) 315 316/* X86_TUNE_ONE_IF_CONV_INSNS: Restrict a number of cmov insns in 317 if-converted sequence to one. */ 318DEF_TUNE (X86_TUNE_ONE_IF_CONV_INSN, "one_if_conv_insn", 319 m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_CORE_ALL | m_GOLDMONT 320 | m_GOLDMONT_PLUS | m_TREMONT | m_GENERIC) 321 322/* X86_TUNE_AVOID_MFENCE: Use lock prefixed instructions instead of mfence. */ 323DEF_TUNE (X86_TUNE_AVOID_MFENCE, "avoid_mfence", 324 m_CORE_ALL | m_BDVER | m_ZNVER | m_GENERIC) 325 326/* X86_TUNE_EXPAND_ABS: This enables a new abs pattern by 327 generating instructions for abs (x) = (((signed) x >> (W-1) ^ x) - 328 (signed) x >> (W-1)) instead of cmove or SSE max/abs instructions. */ 329DEF_TUNE (X86_TUNE_EXPAND_ABS, "expand_abs", 330 m_CORE_ALL | m_SILVERMONT | m_KNL | m_KNM | m_GOLDMONT 331 | m_GOLDMONT_PLUS | m_TREMONT ) 332 333/*****************************************************************************/ 334/* 387 instruction selection tuning */ 335/*****************************************************************************/ 336 337/* X86_TUNE_USE_HIMODE_FIOP: Enables use of x87 instructions with 16bit 338 integer operand. 339 FIXME: Why this is disabled for modern chips? */ 340DEF_TUNE (X86_TUNE_USE_HIMODE_FIOP, "use_himode_fiop", 341 m_386 | m_486 | m_K6_GEODE) 342 343/* X86_TUNE_USE_SIMODE_FIOP: Enables use of x87 instructions with 32bit 344 integer operand. */ 345DEF_TUNE (X86_TUNE_USE_SIMODE_FIOP, "use_simode_fiop", 346 ~(m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL 347 | m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_AMD_MULTIPLE 348 | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_GENERIC)) 349 350/* X86_TUNE_USE_FFREEP: Use freep instruction instead of fstp. */ 351DEF_TUNE (X86_TUNE_USE_FFREEP, "use_ffreep", m_AMD_MULTIPLE) 352 353/* X86_TUNE_EXT_80387_CONSTANTS: Use fancy 80387 constants, such as PI. */ 354DEF_TUNE (X86_TUNE_EXT_80387_CONSTANTS, "ext_80387_constants", 355 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT 356 | m_KNL | m_KNM | m_INTEL | m_K6_GEODE | m_ATHLON_K8 | m_GOLDMONT 357 | m_GOLDMONT_PLUS | m_TREMONT | m_GENERIC) 358 359/*****************************************************************************/ 360/* SSE instruction selection tuning */ 361/*****************************************************************************/ 362 363/* X86_TUNE_GENERAL_REGS_SSE_SPILL: Try to spill general regs to SSE 364 regs instead of memory. */ 365DEF_TUNE (X86_TUNE_GENERAL_REGS_SSE_SPILL, "general_regs_sse_spill", 366 m_CORE_ALL) 367 368/* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL: Use movups for misaligned loads instead 369 of a sequence loading registers by parts. */ 370DEF_TUNE (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL, "sse_unaligned_load_optimal", 371 m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_SILVERMONT | m_KNL | m_KNM 372 | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS 373 | m_TREMONT | m_AMDFAM10 | m_BDVER | m_BTVER | m_ZNVER | m_GENERIC) 374 375/* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL: Use movups for misaligned stores 376 instead of a sequence loading registers by parts. */ 377DEF_TUNE (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL, "sse_unaligned_store_optimal", 378 m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_SILVERMONT | m_KNL | m_KNM 379 | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS 380 | m_TREMONT | m_BDVER | m_ZNVER | m_GENERIC) 381 382/* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL: Use packed single 383 precision 128bit instructions instead of double where possible. */ 384DEF_TUNE (X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL, "sse_packed_single_insn_optimal", 385 m_BDVER | m_ZNVER) 386 387/* X86_TUNE_SSE_TYPELESS_STORES: Always movaps/movups for 128bit stores. */ 388DEF_TUNE (X86_TUNE_SSE_TYPELESS_STORES, "sse_typeless_stores", 389 m_AMD_MULTIPLE | m_CORE_ALL | m_GENERIC) 390 391/* X86_TUNE_SSE_LOAD0_BY_PXOR: Always use pxor to load0 as opposed to 392 xorps/xorpd and other variants. */ 393DEF_TUNE (X86_TUNE_SSE_LOAD0_BY_PXOR, "sse_load0_by_pxor", 394 m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BDVER | m_BTVER | m_ZNVER 395 | m_GENERIC) 396 397/* X86_TUNE_INTER_UNIT_MOVES_TO_VEC: Enable moves in from integer 398 to SSE registers. If disabled, the moves will be done by storing 399 the value to memory and reloading. 400 Enable this flag for generic - the only relevant architecture preferring 401 no inter-unit moves is Buldozer. While this makes small regression on SPECfp 402 scores (sub 0.3%), disabling inter-unit moves penalizes noticeably hand 403 written vectorized code which use i.e. _mm_set_epi16. */ 404DEF_TUNE (X86_TUNE_INTER_UNIT_MOVES_TO_VEC, "inter_unit_moves_to_vec", 405 ~(m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER)) 406 407/* X86_TUNE_INTER_UNIT_MOVES_TO_VEC: Enable moves in from SSE 408 to integer registers. If disabled, the moves will be done by storing 409 the value to memory and reloading. */ 410DEF_TUNE (X86_TUNE_INTER_UNIT_MOVES_FROM_VEC, "inter_unit_moves_from_vec", 411 ~m_ATHLON_K8) 412 413/* X86_TUNE_INTER_UNIT_CONVERSIONS: Enable float<->integer conversions 414 to use both SSE and integer registers at a same time. */ 415DEF_TUNE (X86_TUNE_INTER_UNIT_CONVERSIONS, "inter_unit_conversions", 416 ~(m_AMDFAM10 | m_BDVER)) 417 418/* X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS: Try to split memory operand for 419 fp converts to destination register. */ 420DEF_TUNE (X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS, "split_mem_opnd_for_fp_converts", 421 m_SILVERMONT | m_KNL | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS 422 | m_TREMONT | m_INTEL) 423 424/* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion 425 from FP to FP. This form of instructions avoids partial write to the 426 destination. */ 427DEF_TUNE (X86_TUNE_USE_VECTOR_FP_CONVERTS, "use_vector_fp_converts", 428 m_AMDFAM10) 429 430/* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion 431 from integer to FP. */ 432DEF_TUNE (X86_TUNE_USE_VECTOR_CONVERTS, "use_vector_converts", m_AMDFAM10) 433 434/* X86_TUNE_SLOW_SHUFB: Indicates tunings with slow pshufb instruction. */ 435DEF_TUNE (X86_TUNE_SLOW_PSHUFB, "slow_pshufb", 436 m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_GOLDMONT 437 | m_GOLDMONT_PLUS | m_TREMONT | m_INTEL) 438 439/* X86_TUNE_AVOID_4BYTE_PREFIXES: Avoid instructions requiring 4+ bytes of prefixes. */ 440DEF_TUNE (X86_TUNE_AVOID_4BYTE_PREFIXES, "avoid_4byte_prefixes", 441 m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_INTEL) 442 443/* X86_TUNE_USE_GATHER: Use gather instructions. */ 444DEF_TUNE (X86_TUNE_USE_GATHER, "use_gather", 445 ~(m_ZNVER1 | m_ZNVER2 | m_GENERIC)) 446 447/* X86_TUNE_AVOID_128FMA_CHAINS: Avoid creating loops with tight 128bit or 448 smaller FMA chain. */ 449DEF_TUNE (X86_TUNE_AVOID_128FMA_CHAINS, "avoid_fma_chains", m_ZNVER) 450 451/* X86_TUNE_AVOID_256FMA_CHAINS: Avoid creating loops with tight 256bit or 452 smaller FMA chain. */ 453DEF_TUNE (X86_TUNE_AVOID_256FMA_CHAINS, "avoid_fma256_chains", m_ZNVER2 | m_ZNVER3) 454 455/*****************************************************************************/ 456/* AVX instruction selection tuning (some of SSE flags affects AVX, too) */ 457/*****************************************************************************/ 458 459/* X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL: if false, unaligned loads are 460 split. */ 461DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL, "256_unaligned_load_optimal", 462 ~(m_NEHALEM | m_SANDYBRIDGE)) 463 464/* X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL: if false, unaligned stores are 465 split. */ 466DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL, "256_unaligned_store_optimal", 467 ~(m_NEHALEM | m_SANDYBRIDGE | m_BDVER | m_ZNVER1)) 468 469/* X86_TUNE_AVX256_SPLIT_REGS: if true, AVX256 ops are split into two AVX128 ops. */ 470DEF_TUNE (X86_TUNE_AVX256_SPLIT_REGS, "avx256_split_regs",m_BDVER | m_BTVER2 471 | m_ZNVER1) 472 473/* X86_TUNE_AVX128_OPTIMAL: Enable 128-bit AVX instruction generation for 474 the auto-vectorizer. */ 475DEF_TUNE (X86_TUNE_AVX128_OPTIMAL, "avx128_optimal", m_BDVER | m_BTVER2 476 | m_ZNVER1) 477 478/* X86_TUNE_AVX256_OPTIMAL: Use 256-bit AVX instructions instead of 512-bit AVX 479 instructions in the auto-vectorizer. */ 480DEF_TUNE (X86_TUNE_AVX256_OPTIMAL, "avx256_optimal", m_CORE_AVX512) 481 482/*****************************************************************************/ 483/* Historical relics: tuning flags that helps a specific old CPU designs */ 484/*****************************************************************************/ 485 486/* X86_TUNE_DOUBLE_WITH_ADD: Use add instead of sal to double value in 487 an integer register. */ 488DEF_TUNE (X86_TUNE_DOUBLE_WITH_ADD, "double_with_add", ~m_386) 489 490/* X86_TUNE_ALWAYS_FANCY_MATH_387: controls use of fancy 387 operations, 491 such as fsqrt, fprem, fsin, fcos, fsincos etc. 492 Should be enabled for all targets that always has coprocesor. */ 493DEF_TUNE (X86_TUNE_ALWAYS_FANCY_MATH_387, "always_fancy_math_387", 494 ~(m_386 | m_486 | m_LAKEMONT)) 495 496/* X86_TUNE_UNROLL_STRLEN: Produce (quite lame) unrolled sequence for 497 inline strlen. This affects only -minline-all-stringops mode. By 498 default we always dispatch to a library since our internal strlen 499 is bad. */ 500DEF_TUNE (X86_TUNE_UNROLL_STRLEN, "unroll_strlen", ~m_386) 501 502/* X86_TUNE_SHIFT1: Enables use of short encoding of "sal reg" instead of 503 longer "sal $1, reg". */ 504DEF_TUNE (X86_TUNE_SHIFT1, "shift1", ~m_486) 505 506/* X86_TUNE_ZERO_EXTEND_WITH_AND: Use AND instruction instead 507 of mozbl/movwl. */ 508DEF_TUNE (X86_TUNE_ZERO_EXTEND_WITH_AND, "zero_extend_with_and", 509 m_486 | m_PENT) 510 511/* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode 512 and SImode multiply, but 386 and 486 do HImode multiply faster. */ 513DEF_TUNE (X86_TUNE_PROMOTE_HIMODE_IMUL, "promote_himode_imul", 514 ~(m_386 | m_486)) 515 516/* X86_TUNE_FAST_PREFIX: Enable demoting some 32bit or 64bit arithmetic 517 into 16bit/8bit when resulting sequence is shorter. For example 518 for "and $-65536, reg" to 16bit store of 0. */ 519DEF_TUNE (X86_TUNE_FAST_PREFIX, "fast_prefix", 520 ~(m_386 | m_486 | m_PENT | m_LAKEMONT)) 521 522/* X86_TUNE_READ_MODIFY_WRITE: Enable use of read modify write instructions 523 such as "add $1, mem". */ 524DEF_TUNE (X86_TUNE_READ_MODIFY_WRITE, "read_modify_write", 525 ~(m_PENT | m_LAKEMONT)) 526 527/* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR 528 than a MOV. */ 529DEF_TUNE (X86_TUNE_MOVE_M1_VIA_OR, "move_m1_via_or", m_PENT | m_LAKEMONT) 530 531/* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is, 532 but one byte longer. */ 533DEF_TUNE (X86_TUNE_NOT_UNPAIRABLE, "not_unpairable", m_PENT | m_LAKEMONT) 534 535/* X86_TUNE_PARTIAL_REG_STALL: Pentium pro, unlike later chips, handled 536 use of partial registers by renaming. This improved performance of 16bit 537 code where upper halves of registers are not used. It also leads to 538 an penalty whenever a 16bit store is followed by 32bit use. This flag 539 disables production of such sequences in common cases. 540 See also X86_TUNE_HIMODE_MATH. 541 542 In current implementation the partial register stalls are not eliminated 543 very well - they can be introduced via subregs synthesized by combine 544 and can happen in caller/callee saving sequences. */ 545DEF_TUNE (X86_TUNE_PARTIAL_REG_STALL, "partial_reg_stall", m_PPRO) 546 547/* X86_TUNE_PROMOTE_QIMODE: When it is cheap, turn 8bit arithmetic to 548 corresponding 32bit arithmetic. */ 549DEF_TUNE (X86_TUNE_PROMOTE_QIMODE, "promote_qimode", 550 ~m_PPRO) 551 552/* X86_TUNE_PROMOTE_HI_REGS: Same, but for 16bit artihmetic. Again we avoid 553 partial register stalls on PentiumPro targets. */ 554DEF_TUNE (X86_TUNE_PROMOTE_HI_REGS, "promote_hi_regs", m_PPRO) 555 556/* X86_TUNE_HIMODE_MATH: Enable use of 16bit arithmetic. 557 On PPro this flag is meant to avoid partial register stalls. */ 558DEF_TUNE (X86_TUNE_HIMODE_MATH, "himode_math", ~m_PPRO) 559 560/* X86_TUNE_SPLIT_LONG_MOVES: Avoid instructions moving immediates 561 directly to memory. */ 562DEF_TUNE (X86_TUNE_SPLIT_LONG_MOVES, "split_long_moves", m_PPRO) 563 564/* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */ 565DEF_TUNE (X86_TUNE_USE_XCHGB, "use_xchgb", m_PENT4) 566 567/* X86_TUNE_USE_MOV0: Use "mov $0, reg" instead of "xor reg, reg" to clear 568 integer register. */ 569DEF_TUNE (X86_TUNE_USE_MOV0, "use_mov0", m_K6) 570 571/* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory 572 operand that cannot be represented using a modRM byte. The XOR 573 replacement is long decoded, so this split helps here as well. */ 574DEF_TUNE (X86_TUNE_NOT_VECTORMODE, "not_vectormode", m_K6) 575 576/* X86_TUNE_AVOID_VECTOR_DECODE: Enable splitters that avoid vector decoded 577 forms of instructions on K8 targets. */ 578DEF_TUNE (X86_TUNE_AVOID_VECTOR_DECODE, "avoid_vector_decode", 579 m_K8) 580 581/*****************************************************************************/ 582/* This never worked well before. */ 583/*****************************************************************************/ 584 585/* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based 586 on simulation result. But after P4 was made, no performance benefit 587 was observed with branch hints. It also increases the code size. 588 As a result, icc never generates branch hints. */ 589DEF_TUNE (X86_TUNE_BRANCH_PREDICTION_HINTS, "branch_prediction_hints", m_NONE) 590 591/* X86_TUNE_QIMODE_MATH: Enable use of 8bit arithmetic. */ 592DEF_TUNE (X86_TUNE_QIMODE_MATH, "qimode_math", m_ALL) 593 594/* X86_TUNE_PROMOTE_QI_REGS: This enables generic code that promotes all 8bit 595 arithmetic to 32bit via PROMOTE_MODE macro. This code generation scheme 596 is usually used for RISC targets. */ 597DEF_TUNE (X86_TUNE_PROMOTE_QI_REGS, "promote_qi_regs", m_NONE) 598 599/* X86_TUNE_EMIT_VZEROUPPER: This enables vzeroupper instruction insertion 600 before a transfer of control flow out of the function. */ 601DEF_TUNE (X86_TUNE_EMIT_VZEROUPPER, "emit_vzeroupper", ~m_KNL) 602