1 //===-- X86.h - Top-level interface for X86 representation ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the entry points for global functions defined in the x86
10 // target library, as used by the LLVM JIT.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_X86_X86_H
15 #define LLVM_LIB_TARGET_X86_X86_H
16 
17 #include "llvm/Support/CodeGen.h"
18 
19 namespace llvm {
20 
21 class FunctionPass;
22 class InstructionSelector;
23 class PassRegistry;
24 class X86RegisterBankInfo;
25 class X86Subtarget;
26 class X86TargetMachine;
27 
28 /// This pass converts a legalized DAG into a X86-specific DAG, ready for
29 /// instruction scheduling.
30 FunctionPass *createX86ISelDag(X86TargetMachine &TM,
31                                CodeGenOpt::Level OptLevel);
32 
33 /// This pass initializes a global base register for PIC on x86-32.
34 FunctionPass *createX86GlobalBaseRegPass();
35 
36 /// This pass combines multiple accesses to local-dynamic TLS variables so that
37 /// the TLS base address for the module is only fetched once per execution path
38 /// through the function.
39 FunctionPass *createCleanupLocalDynamicTLSPass();
40 
41 /// This function returns a pass which converts floating-point register
42 /// references and pseudo instructions into floating-point stack references and
43 /// physical instructions.
44 FunctionPass *createX86FloatingPointStackifierPass();
45 
46 /// This pass inserts AVX vzeroupper instructions before each call to avoid
47 /// transition penalty between functions encoded with AVX and SSE.
48 FunctionPass *createX86IssueVZeroUpperPass();
49 
50 /// This pass inserts ENDBR instructions before indirect jump/call
51 /// destinations as part of CET IBT mechanism.
52 FunctionPass *createX86IndirectBranchTrackingPass();
53 
54 /// Return a pass that pads short functions with NOOPs.
55 /// This will prevent a stall when returning on the Atom.
56 FunctionPass *createX86PadShortFunctions();
57 
58 /// Return a pass that selectively replaces certain instructions (like add,
59 /// sub, inc, dec, some shifts, and some multiplies) by equivalent LEA
60 /// instructions, in order to eliminate execution delays in some processors.
61 FunctionPass *createX86FixupLEAs();
62 
63 /// Return a pass that replaces equivalent slower instructions with faster
64 /// ones.
65 FunctionPass *createX86FixupInstTuning();
66 
67 /// Return a pass that reduces the size of vector constant pool loads.
68 FunctionPass *createX86FixupVectorConstants();
69 
70 /// Return a pass that removes redundant LEA instructions and redundant address
71 /// recalculations.
72 FunctionPass *createX86OptimizeLEAs();
73 
74 /// Return a pass that transforms setcc + movzx pairs into xor + setcc.
75 FunctionPass *createX86FixupSetCC();
76 
77 /// Return a pass that avoids creating store forward block issues in the hardware.
78 FunctionPass *createX86AvoidStoreForwardingBlocks();
79 
80 /// Return a pass that lowers EFLAGS copy pseudo instructions.
81 FunctionPass *createX86FlagsCopyLoweringPass();
82 
83 /// Return a pass that expands DynAlloca pseudo-instructions.
84 FunctionPass *createX86DynAllocaExpander();
85 
86 /// Return a pass that config the tile registers.
87 FunctionPass *createX86TileConfigPass();
88 
89 /// Return a pass that preconfig the tile registers before fast reg allocation.
90 FunctionPass *createX86FastPreTileConfigPass();
91 
92 /// Return a pass that config the tile registers after fast reg allocation.
93 FunctionPass *createX86FastTileConfigPass();
94 
95 /// Return a pass that insert pseudo tile config instruction.
96 FunctionPass *createX86PreTileConfigPass();
97 
98 /// Return a pass that lower the tile copy instruction.
99 FunctionPass *createX86LowerTileCopyPass();
100 
101 /// Return a pass that inserts int3 at the end of the function if it ends with a
102 /// CALL instruction. The pass does the same for each funclet as well. This
103 /// ensures that the open interval of function start and end PCs contains all
104 /// return addresses for the benefit of the Windows x64 unwinder.
105 FunctionPass *createX86AvoidTrailingCallPass();
106 
107 /// Return a pass that optimizes the code-size of x86 call sequences. This is
108 /// done by replacing esp-relative movs with pushes.
109 FunctionPass *createX86CallFrameOptimization();
110 
111 /// Return an IR pass that inserts EH registration stack objects and explicit
112 /// EH state updates. This pass must run after EH preparation, which does
113 /// Windows-specific but architecture-neutral preparation.
114 FunctionPass *createX86WinEHStatePass();
115 
116 /// Return a Machine IR pass that expands X86-specific pseudo
117 /// instructions into a sequence of actual instructions. This pass
118 /// must run after prologue/epilogue insertion and before lowering
119 /// the MachineInstr to MC.
120 FunctionPass *createX86ExpandPseudoPass();
121 
122 /// This pass converts X86 cmov instructions into branch when profitable.
123 FunctionPass *createX86CmovConverterPass();
124 
125 /// Return a Machine IR pass that selectively replaces
126 /// certain byte and word instructions by equivalent 32 bit instructions,
127 /// in order to eliminate partial register usage, false dependences on
128 /// the upper portions of registers, and to save code size.
129 FunctionPass *createX86FixupBWInsts();
130 
131 /// Return a Machine IR pass that reassigns instruction chains from one domain
132 /// to another, when profitable.
133 FunctionPass *createX86DomainReassignmentPass();
134 
135 /// This pass replaces EVEX encoded of AVX-512 instructiosn by VEX
136 /// encoding when possible in order to reduce code size.
137 FunctionPass *createX86EvexToVexInsts();
138 
139 /// This pass creates the thunks for the retpoline feature.
140 FunctionPass *createX86IndirectThunksPass();
141 
142 /// This pass replaces ret instructions with jmp's to __x86_return thunk.
143 FunctionPass *createX86ReturnThunksPass();
144 
145 /// This pass ensures instructions featuring a memory operand
146 /// have distinctive <LineNumber, Discriminator> (with respect to eachother)
147 FunctionPass *createX86DiscriminateMemOpsPass();
148 
149 /// This pass applies profiling information to insert cache prefetches.
150 FunctionPass *createX86InsertPrefetchPass();
151 
152 /// This pass insert wait instruction after X87 instructions which could raise
153 /// fp exceptions when strict-fp enabled.
154 FunctionPass *createX86InsertX87waitPass();
155 
156 /// This pass optimizes arithmetic based on knowledge that is only used by
157 /// a reduction sequence and is therefore safe to reassociate in interesting
158 /// ways.
159 FunctionPass *createX86PartialReductionPass();
160 
161 InstructionSelector *createX86InstructionSelector(const X86TargetMachine &TM,
162                                                   X86Subtarget &,
163                                                   X86RegisterBankInfo &);
164 
165 FunctionPass *createX86LoadValueInjectionLoadHardeningPass();
166 FunctionPass *createX86LoadValueInjectionRetHardeningPass();
167 FunctionPass *createX86SpeculativeLoadHardeningPass();
168 FunctionPass *createX86SpeculativeExecutionSideEffectSuppression();
169 FunctionPass *createX86ArgumentStackSlotPass();
170 
171 void initializeEvexToVexInstPassPass(PassRegistry &);
172 void initializeFPSPass(PassRegistry &);
173 void initializeFixupBWInstPassPass(PassRegistry &);
174 void initializeFixupLEAPassPass(PassRegistry &);
175 void initializeX86ArgumentStackSlotPassPass(PassRegistry &);
176 void initializeX86FixupInstTuningPassPass(PassRegistry &);
177 void initializeX86FixupVectorConstantsPassPass(PassRegistry &);
178 void initializeWinEHStatePassPass(PassRegistry &);
179 void initializeX86AvoidSFBPassPass(PassRegistry &);
180 void initializeX86AvoidTrailingCallPassPass(PassRegistry &);
181 void initializeX86CallFrameOptimizationPass(PassRegistry &);
182 void initializeX86CmovConverterPassPass(PassRegistry &);
183 void initializeX86DAGToDAGISelPass(PassRegistry &);
184 void initializeX86DomainReassignmentPass(PassRegistry &);
185 void initializeX86ExecutionDomainFixPass(PassRegistry &);
186 void initializeX86ExpandPseudoPass(PassRegistry &);
187 void initializeX86FastPreTileConfigPass(PassRegistry &);
188 void initializeX86FastTileConfigPass(PassRegistry &);
189 void initializeX86FixupSetCCPassPass(PassRegistry &);
190 void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
191 void initializeX86LoadValueInjectionLoadHardeningPassPass(PassRegistry &);
192 void initializeX86LoadValueInjectionRetHardeningPassPass(PassRegistry &);
193 void initializeX86LowerAMXIntrinsicsLegacyPassPass(PassRegistry &);
194 void initializeX86LowerAMXTypeLegacyPassPass(PassRegistry &);
195 void initializeX86LowerTileCopyPass(PassRegistry &);
196 void initializeX86OptimizeLEAPassPass(PassRegistry &);
197 void initializeX86PartialReductionPass(PassRegistry &);
198 void initializeX86PreAMXConfigPassPass(PassRegistry &);
199 void initializeX86PreTileConfigPass(PassRegistry &);
200 void initializeX86ReturnThunksPass(PassRegistry &);
201 void initializeX86SpeculativeExecutionSideEffectSuppressionPass(PassRegistry &);
202 void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &);
203 void initializeX86TileConfigPass(PassRegistry &);
204 
205 namespace X86AS {
206 enum : unsigned {
207   GS = 256,
208   FS = 257,
209   SS = 258,
210   PTR32_SPTR = 270,
211   PTR32_UPTR = 271,
212   PTR64 = 272
213 };
214 } // End X86AS namespace
215 
216 } // End llvm namespace
217 
218 #endif
219