1 //===-- X86.h - Top-level interface for X86 representation ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the entry points for global functions defined in the x86
10 // target library, as used by the LLVM JIT.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_X86_X86_H
15 #define LLVM_LIB_TARGET_X86_X86_H
16 
17 #include "llvm/Support/CodeGen.h"
18 
19 namespace llvm {
20 
21 class FunctionPass;
22 class InstructionSelector;
23 class PassRegistry;
24 class X86RegisterBankInfo;
25 class X86Subtarget;
26 class X86TargetMachine;
27 
28 /// This pass converts a legalized DAG into a X86-specific DAG, ready for
29 /// instruction scheduling.
30 FunctionPass *createX86ISelDag(X86TargetMachine &TM,
31                                CodeGenOpt::Level OptLevel);
32 
33 /// This pass initializes a global base register for PIC on x86-32.
34 FunctionPass *createX86GlobalBaseRegPass();
35 
36 /// This pass combines multiple accesses to local-dynamic TLS variables so that
37 /// the TLS base address for the module is only fetched once per execution path
38 /// through the function.
39 FunctionPass *createCleanupLocalDynamicTLSPass();
40 
41 /// This function returns a pass which converts floating-point register
42 /// references and pseudo instructions into floating-point stack references and
43 /// physical instructions.
44 FunctionPass *createX86FloatingPointStackifierPass();
45 
46 /// This pass inserts AVX vzeroupper instructions before each call to avoid
47 /// transition penalty between functions encoded with AVX and SSE.
48 FunctionPass *createX86IssueVZeroUpperPass();
49 
50 /// This pass inserts ENDBR instructions before indirect jump/call
51 /// destinations as part of CET IBT mechanism.
52 FunctionPass *createX86IndirectBranchTrackingPass();
53 
54 /// Return a pass that pads short functions with NOOPs.
55 /// This will prevent a stall when returning on the Atom.
56 FunctionPass *createX86PadShortFunctions();
57 
58 /// Return a pass that selectively replaces certain instructions (like add,
59 /// sub, inc, dec, some shifts, and some multiplies) by equivalent LEA
60 /// instructions, in order to eliminate execution delays in some processors.
61 FunctionPass *createX86FixupLEAs();
62 
63 /// Return a pass that removes redundant LEA instructions and redundant address
64 /// recalculations.
65 FunctionPass *createX86OptimizeLEAs();
66 
67 /// Return a pass that transforms setcc + movzx pairs into xor + setcc.
68 FunctionPass *createX86FixupSetCC();
69 
70 /// Return a pass that avoids creating store forward block issues in the hardware.
71 FunctionPass *createX86AvoidStoreForwardingBlocks();
72 
73 /// Return a pass that lowers EFLAGS copy pseudo instructions.
74 FunctionPass *createX86FlagsCopyLoweringPass();
75 
76 /// Return a pass that expands WinAlloca pseudo-instructions.
77 FunctionPass *createX86WinAllocaExpander();
78 
79 FunctionPass *createX86TileConfigPass();
80 
81 FunctionPass *createX86PreTileConfigPass();
82 
83 /// Return a pass that inserts int3 at the end of the function if it ends with a
84 /// CALL instruction. The pass does the same for each funclet as well. This
85 /// ensures that the open interval of function start and end PCs contains all
86 /// return addresses for the benefit of the Windows x64 unwinder.
87 FunctionPass *createX86AvoidTrailingCallPass();
88 
89 /// Return a pass that optimizes the code-size of x86 call sequences. This is
90 /// done by replacing esp-relative movs with pushes.
91 FunctionPass *createX86CallFrameOptimization();
92 
93 /// Return an IR pass that inserts EH registration stack objects and explicit
94 /// EH state updates. This pass must run after EH preparation, which does
95 /// Windows-specific but architecture-neutral preparation.
96 FunctionPass *createX86WinEHStatePass();
97 
98 /// Return a Machine IR pass that expands X86-specific pseudo
99 /// instructions into a sequence of actual instructions. This pass
100 /// must run after prologue/epilogue insertion and before lowering
101 /// the MachineInstr to MC.
102 FunctionPass *createX86ExpandPseudoPass();
103 
104 /// This pass converts X86 cmov instructions into branch when profitable.
105 FunctionPass *createX86CmovConverterPass();
106 
107 /// Return a Machine IR pass that selectively replaces
108 /// certain byte and word instructions by equivalent 32 bit instructions,
109 /// in order to eliminate partial register usage, false dependences on
110 /// the upper portions of registers, and to save code size.
111 FunctionPass *createX86FixupBWInsts();
112 
113 /// Return a Machine IR pass that reassigns instruction chains from one domain
114 /// to another, when profitable.
115 FunctionPass *createX86DomainReassignmentPass();
116 
117 /// This pass replaces EVEX encoded of AVX-512 instructiosn by VEX
118 /// encoding when possible in order to reduce code size.
119 FunctionPass *createX86EvexToVexInsts();
120 
121 /// This pass creates the thunks for the retpoline feature.
122 FunctionPass *createX86IndirectThunksPass();
123 
124 /// This pass ensures instructions featuring a memory operand
125 /// have distinctive <LineNumber, Discriminator> (with respect to eachother)
126 FunctionPass *createX86DiscriminateMemOpsPass();
127 
128 /// This pass applies profiling information to insert cache prefetches.
129 FunctionPass *createX86InsertPrefetchPass();
130 
131 /// This pass insert wait instruction after X87 instructions which could raise
132 /// fp exceptions when strict-fp enabled.
133 FunctionPass *createX86InsertX87waitPass();
134 
135 /// This pass optimizes arithmetic based on knowledge that is only used by
136 /// a reduction sequence and is therefore safe to reassociate in interesting
137 /// ways.
138 FunctionPass *createX86PartialReductionPass();
139 
140 InstructionSelector *createX86InstructionSelector(const X86TargetMachine &TM,
141                                                   X86Subtarget &,
142                                                   X86RegisterBankInfo &);
143 
144 FunctionPass *createX86LoadValueInjectionLoadHardeningPass();
145 FunctionPass *createX86LoadValueInjectionRetHardeningPass();
146 FunctionPass *createX86SpeculativeLoadHardeningPass();
147 FunctionPass *createX86SpeculativeExecutionSideEffectSuppression();
148 
149 void initializeEvexToVexInstPassPass(PassRegistry &);
150 void initializeFixupBWInstPassPass(PassRegistry &);
151 void initializeFixupLEAPassPass(PassRegistry &);
152 void initializeFPSPass(PassRegistry &);
153 void initializeWinEHStatePassPass(PassRegistry &);
154 void initializeX86AvoidSFBPassPass(PassRegistry &);
155 void initializeX86AvoidTrailingCallPassPass(PassRegistry &);
156 void initializeX86CallFrameOptimizationPass(PassRegistry &);
157 void initializeX86CmovConverterPassPass(PassRegistry &);
158 void initializeX86DomainReassignmentPass(PassRegistry &);
159 void initializeX86ExecutionDomainFixPass(PassRegistry &);
160 void initializeX86ExpandPseudoPass(PassRegistry &);
161 void initializeX86FixupSetCCPassPass(PassRegistry &);
162 void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
163 void initializeX86LoadValueInjectionLoadHardeningPassPass(PassRegistry &);
164 void initializeX86LoadValueInjectionRetHardeningPassPass(PassRegistry &);
165 void initializeX86OptimizeLEAPassPass(PassRegistry &);
166 void initializeX86PartialReductionPass(PassRegistry &);
167 void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &);
168 void initializeX86SpeculativeExecutionSideEffectSuppressionPass(PassRegistry &);
169 void initializeX86PreTileConfigPass(PassRegistry &);
170 void initializeX86TileConfigPass(PassRegistry &);
171 void initializeX86LowerAMXTypeLegacyPassPass(PassRegistry &);
172 
173 namespace X86AS {
174 enum : unsigned {
175   GS = 256,
176   FS = 257,
177   SS = 258,
178   PTR32_SPTR = 270,
179   PTR32_UPTR = 271,
180   PTR64 = 272
181 };
182 } // End X86AS namespace
183 
184 } // End llvm namespace
185 
186 #endif
187