1 //===-- X86.h - Top-level interface for X86 representation ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the entry points for global functions defined in the x86
10 // target library, as used by the LLVM JIT.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_X86_X86_H
15 #define LLVM_LIB_TARGET_X86_X86_H
16 
17 #include "llvm/Support/CodeGen.h"
18 
19 namespace llvm {
20 
21 class FunctionPass;
22 class InstructionSelector;
23 class PassRegistry;
24 class X86RegisterBankInfo;
25 class X86Subtarget;
26 class X86TargetMachine;
27 
28 /// This pass converts a legalized DAG into a X86-specific DAG, ready for
29 /// instruction scheduling.
30 FunctionPass *createX86ISelDag(X86TargetMachine &TM,
31                                CodeGenOpt::Level OptLevel);
32 
33 /// This pass initializes a global base register for PIC on x86-32.
34 FunctionPass *createX86GlobalBaseRegPass();
35 
36 /// This pass combines multiple accesses to local-dynamic TLS variables so that
37 /// the TLS base address for the module is only fetched once per execution path
38 /// through the function.
39 FunctionPass *createCleanupLocalDynamicTLSPass();
40 
41 /// This function returns a pass which converts floating-point register
42 /// references and pseudo instructions into floating-point stack references and
43 /// physical instructions.
44 FunctionPass *createX86FloatingPointStackifierPass();
45 
46 /// This pass inserts AVX vzeroupper instructions before each call to avoid
47 /// transition penalty between functions encoded with AVX and SSE.
48 FunctionPass *createX86IssueVZeroUpperPass();
49 
50 /// This pass inserts ENDBR instructions before indirect jump/call
51 /// destinations as part of CET IBT mechanism.
52 FunctionPass *createX86IndirectBranchTrackingPass();
53 
54 /// This pass inserts KCFI checks before indirect calls.
55 FunctionPass *createX86KCFIPass();
56 
57 /// Return a pass that pads short functions with NOOPs.
58 /// This will prevent a stall when returning on the Atom.
59 FunctionPass *createX86PadShortFunctions();
60 
61 /// Return a pass that selectively replaces certain instructions (like add,
62 /// sub, inc, dec, some shifts, and some multiplies) by equivalent LEA
63 /// instructions, in order to eliminate execution delays in some processors.
64 FunctionPass *createX86FixupLEAs();
65 
66 /// Return a pass that removes redundant LEA instructions and redundant address
67 /// recalculations.
68 FunctionPass *createX86OptimizeLEAs();
69 
70 /// Return a pass that transforms setcc + movzx pairs into xor + setcc.
71 FunctionPass *createX86FixupSetCC();
72 
73 /// Return a pass that avoids creating store forward block issues in the hardware.
74 FunctionPass *createX86AvoidStoreForwardingBlocks();
75 
76 /// Return a pass that lowers EFLAGS copy pseudo instructions.
77 FunctionPass *createX86FlagsCopyLoweringPass();
78 
79 /// Return a pass that expands DynAlloca pseudo-instructions.
80 FunctionPass *createX86DynAllocaExpander();
81 
82 /// Return a pass that config the tile registers.
83 FunctionPass *createX86TileConfigPass();
84 
85 /// Return a pass that preconfig the tile registers before fast reg allocation.
86 FunctionPass *createX86FastPreTileConfigPass();
87 
88 /// Return a pass that config the tile registers after fast reg allocation.
89 FunctionPass *createX86FastTileConfigPass();
90 
91 /// Return a pass that insert pseudo tile config instruction.
92 FunctionPass *createX86PreTileConfigPass();
93 
94 /// Return a pass that lower the tile copy instruction.
95 FunctionPass *createX86LowerTileCopyPass();
96 
97 /// Return a pass that inserts int3 at the end of the function if it ends with a
98 /// CALL instruction. The pass does the same for each funclet as well. This
99 /// ensures that the open interval of function start and end PCs contains all
100 /// return addresses for the benefit of the Windows x64 unwinder.
101 FunctionPass *createX86AvoidTrailingCallPass();
102 
103 /// Return a pass that optimizes the code-size of x86 call sequences. This is
104 /// done by replacing esp-relative movs with pushes.
105 FunctionPass *createX86CallFrameOptimization();
106 
107 /// Return an IR pass that inserts EH registration stack objects and explicit
108 /// EH state updates. This pass must run after EH preparation, which does
109 /// Windows-specific but architecture-neutral preparation.
110 FunctionPass *createX86WinEHStatePass();
111 
112 /// Return a Machine IR pass that expands X86-specific pseudo
113 /// instructions into a sequence of actual instructions. This pass
114 /// must run after prologue/epilogue insertion and before lowering
115 /// the MachineInstr to MC.
116 FunctionPass *createX86ExpandPseudoPass();
117 
118 /// This pass converts X86 cmov instructions into branch when profitable.
119 FunctionPass *createX86CmovConverterPass();
120 
121 /// Return a Machine IR pass that selectively replaces
122 /// certain byte and word instructions by equivalent 32 bit instructions,
123 /// in order to eliminate partial register usage, false dependences on
124 /// the upper portions of registers, and to save code size.
125 FunctionPass *createX86FixupBWInsts();
126 
127 /// Return a Machine IR pass that reassigns instruction chains from one domain
128 /// to another, when profitable.
129 FunctionPass *createX86DomainReassignmentPass();
130 
131 /// This pass replaces EVEX encoded of AVX-512 instructiosn by VEX
132 /// encoding when possible in order to reduce code size.
133 FunctionPass *createX86EvexToVexInsts();
134 
135 /// This pass creates the thunks for the retpoline feature.
136 FunctionPass *createX86IndirectThunksPass();
137 
138 /// This pass replaces ret instructions with jmp's to __x86_return thunk.
139 FunctionPass *createX86ReturnThunksPass();
140 
141 /// This pass ensures instructions featuring a memory operand
142 /// have distinctive <LineNumber, Discriminator> (with respect to eachother)
143 FunctionPass *createX86DiscriminateMemOpsPass();
144 
145 /// This pass applies profiling information to insert cache prefetches.
146 FunctionPass *createX86InsertPrefetchPass();
147 
148 /// This pass insert wait instruction after X87 instructions which could raise
149 /// fp exceptions when strict-fp enabled.
150 FunctionPass *createX86InsertX87waitPass();
151 
152 /// This pass optimizes arithmetic based on knowledge that is only used by
153 /// a reduction sequence and is therefore safe to reassociate in interesting
154 /// ways.
155 FunctionPass *createX86PartialReductionPass();
156 
157 InstructionSelector *createX86InstructionSelector(const X86TargetMachine &TM,
158                                                   X86Subtarget &,
159                                                   X86RegisterBankInfo &);
160 
161 FunctionPass *createX86LoadValueInjectionLoadHardeningPass();
162 FunctionPass *createX86LoadValueInjectionRetHardeningPass();
163 FunctionPass *createX86SpeculativeLoadHardeningPass();
164 FunctionPass *createX86SpeculativeExecutionSideEffectSuppression();
165 
166 void initializeEvexToVexInstPassPass(PassRegistry &);
167 void initializeFPSPass(PassRegistry &);
168 void initializeFixupBWInstPassPass(PassRegistry &);
169 void initializeFixupLEAPassPass(PassRegistry &);
170 void initializeWinEHStatePassPass(PassRegistry &);
171 void initializeX86AvoidSFBPassPass(PassRegistry &);
172 void initializeX86AvoidTrailingCallPassPass(PassRegistry &);
173 void initializeX86CallFrameOptimizationPass(PassRegistry &);
174 void initializeX86CmovConverterPassPass(PassRegistry &);
175 void initializeX86DAGToDAGISelPass(PassRegistry &);
176 void initializeX86DomainReassignmentPass(PassRegistry &);
177 void initializeX86ExecutionDomainFixPass(PassRegistry &);
178 void initializeX86ExpandPseudoPass(PassRegistry &);
179 void initializeX86FastPreTileConfigPass(PassRegistry &);
180 void initializeX86FastTileConfigPass(PassRegistry &);
181 void initializeX86FixupSetCCPassPass(PassRegistry &);
182 void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
183 void initializeX86KCFIPass(PassRegistry &);
184 void initializeX86LoadValueInjectionLoadHardeningPassPass(PassRegistry &);
185 void initializeX86LoadValueInjectionRetHardeningPassPass(PassRegistry &);
186 void initializeX86LowerAMXIntrinsicsLegacyPassPass(PassRegistry &);
187 void initializeX86LowerAMXTypeLegacyPassPass(PassRegistry &);
188 void initializeX86LowerTileCopyPass(PassRegistry &);
189 void initializeX86OptimizeLEAPassPass(PassRegistry &);
190 void initializeX86PartialReductionPass(PassRegistry &);
191 void initializeX86PreAMXConfigPassPass(PassRegistry &);
192 void initializeX86PreTileConfigPass(PassRegistry &);
193 void initializeX86ReturnThunksPass(PassRegistry &);
194 void initializeX86SpeculativeExecutionSideEffectSuppressionPass(PassRegistry &);
195 void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &);
196 void initializeX86TileConfigPass(PassRegistry &);
197 
198 namespace X86AS {
199 enum : unsigned {
200   GS = 256,
201   FS = 257,
202   SS = 258,
203   PTR32_SPTR = 270,
204   PTR32_UPTR = 271,
205   PTR64 = 272
206 };
207 } // End X86AS namespace
208 
209 } // End llvm namespace
210 
211 #endif
212