1 //===- ImplicitNullChecks.cpp - Fold null checks into memory accesses -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass turns explicit null checks of the form
10 //
11 // test %r10, %r10
12 // je throw_npe
13 // movl (%r10), %esi
14 // ...
15 //
16 // to
17 //
18 // faulting_load_op("movl (%r10), %esi", throw_npe)
19 // ...
20 //
21 // With the help of a runtime that understands the .fault_maps section,
22 // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs
23 // a page fault.
24 // Store and LoadStore are also supported.
25 //
26 //===----------------------------------------------------------------------===//
27
28 #include "llvm/ADT/ArrayRef.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/Optional.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/Analysis/AliasAnalysis.h"
35 #include "llvm/Analysis/MemoryLocation.h"
36 #include "llvm/CodeGen/FaultMaps.h"
37 #include "llvm/CodeGen/MachineBasicBlock.h"
38 #include "llvm/CodeGen/MachineFunction.h"
39 #include "llvm/CodeGen/MachineFunctionPass.h"
40 #include "llvm/CodeGen/MachineInstr.h"
41 #include "llvm/CodeGen/MachineInstrBuilder.h"
42 #include "llvm/CodeGen/MachineMemOperand.h"
43 #include "llvm/CodeGen/MachineOperand.h"
44 #include "llvm/CodeGen/MachineRegisterInfo.h"
45 #include "llvm/CodeGen/PseudoSourceValue.h"
46 #include "llvm/CodeGen/TargetInstrInfo.h"
47 #include "llvm/CodeGen/TargetOpcodes.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/TargetSubtargetInfo.h"
50 #include "llvm/IR/BasicBlock.h"
51 #include "llvm/IR/DebugLoc.h"
52 #include "llvm/IR/LLVMContext.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/MC/MCInstrDesc.h"
55 #include "llvm/MC/MCRegisterInfo.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/CommandLine.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <iterator>
61
62 using namespace llvm;
63
64 static cl::opt<int> PageSize("imp-null-check-page-size",
65 cl::desc("The page size of the target in bytes"),
66 cl::init(4096), cl::Hidden);
67
68 static cl::opt<unsigned> MaxInstsToConsider(
69 "imp-null-max-insts-to-consider",
70 cl::desc("The max number of instructions to consider hoisting loads over "
71 "(the algorithm is quadratic over this number)"),
72 cl::Hidden, cl::init(8));
73
74 #define DEBUG_TYPE "implicit-null-checks"
75
76 STATISTIC(NumImplicitNullChecks,
77 "Number of explicit null checks made implicit");
78
79 namespace {
80
81 class ImplicitNullChecks : public MachineFunctionPass {
82 /// Return true if \c computeDependence can process \p MI.
83 static bool canHandle(const MachineInstr *MI);
84
85 /// Helper function for \c computeDependence. Return true if \p A
86 /// and \p B do not have any dependences between them, and can be
87 /// re-ordered without changing program semantics.
88 bool canReorder(const MachineInstr *A, const MachineInstr *B);
89
90 /// A data type for representing the result computed by \c
91 /// computeDependence. States whether it is okay to reorder the
92 /// instruction passed to \c computeDependence with at most one
93 /// dependency.
94 struct DependenceResult {
95 /// Can we actually re-order \p MI with \p Insts (see \c
96 /// computeDependence).
97 bool CanReorder;
98
99 /// If non-None, then an instruction in \p Insts that also must be
100 /// hoisted.
101 Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence;
102
DependenceResult__anonb41066530111::ImplicitNullChecks::DependenceResult103 /*implicit*/ DependenceResult(
104 bool CanReorder,
105 Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence)
106 : CanReorder(CanReorder), PotentialDependence(PotentialDependence) {
107 assert((!PotentialDependence || CanReorder) &&
108 "!CanReorder && PotentialDependence.hasValue() not allowed!");
109 }
110 };
111
112 /// Compute a result for the following question: can \p MI be
113 /// re-ordered from after \p Insts to before it.
114 ///
115 /// \c canHandle should return true for all instructions in \p
116 /// Insts.
117 DependenceResult computeDependence(const MachineInstr *MI,
118 ArrayRef<MachineInstr *> Block);
119
120 /// Represents one null check that can be made implicit.
121 class NullCheck {
122 // The memory operation the null check can be folded into.
123 MachineInstr *MemOperation;
124
125 // The instruction actually doing the null check (Ptr != 0).
126 MachineInstr *CheckOperation;
127
128 // The block the check resides in.
129 MachineBasicBlock *CheckBlock;
130
131 // The block branched to if the pointer is non-null.
132 MachineBasicBlock *NotNullSucc;
133
134 // The block branched to if the pointer is null.
135 MachineBasicBlock *NullSucc;
136
137 // If this is non-null, then MemOperation has a dependency on this
138 // instruction; and it needs to be hoisted to execute before MemOperation.
139 MachineInstr *OnlyDependency;
140
141 public:
NullCheck(MachineInstr * memOperation,MachineInstr * checkOperation,MachineBasicBlock * checkBlock,MachineBasicBlock * notNullSucc,MachineBasicBlock * nullSucc,MachineInstr * onlyDependency)142 explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation,
143 MachineBasicBlock *checkBlock,
144 MachineBasicBlock *notNullSucc,
145 MachineBasicBlock *nullSucc,
146 MachineInstr *onlyDependency)
147 : MemOperation(memOperation), CheckOperation(checkOperation),
148 CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc),
149 OnlyDependency(onlyDependency) {}
150
getMemOperation() const151 MachineInstr *getMemOperation() const { return MemOperation; }
152
getCheckOperation() const153 MachineInstr *getCheckOperation() const { return CheckOperation; }
154
getCheckBlock() const155 MachineBasicBlock *getCheckBlock() const { return CheckBlock; }
156
getNotNullSucc() const157 MachineBasicBlock *getNotNullSucc() const { return NotNullSucc; }
158
getNullSucc() const159 MachineBasicBlock *getNullSucc() const { return NullSucc; }
160
getOnlyDependency() const161 MachineInstr *getOnlyDependency() const { return OnlyDependency; }
162 };
163
164 const TargetInstrInfo *TII = nullptr;
165 const TargetRegisterInfo *TRI = nullptr;
166 AliasAnalysis *AA = nullptr;
167 MachineFrameInfo *MFI = nullptr;
168
169 bool analyzeBlockForNullChecks(MachineBasicBlock &MBB,
170 SmallVectorImpl<NullCheck> &NullCheckList);
171 MachineInstr *insertFaultingInstr(MachineInstr *MI, MachineBasicBlock *MBB,
172 MachineBasicBlock *HandlerMBB);
173 void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList);
174
175 enum AliasResult {
176 AR_NoAlias,
177 AR_MayAlias,
178 AR_WillAliasEverything
179 };
180
181 /// Returns AR_NoAlias if \p MI memory operation does not alias with
182 /// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if
183 /// they may alias and any further memory operation may alias with \p PrevMI.
184 AliasResult areMemoryOpsAliased(const MachineInstr &MI,
185 const MachineInstr *PrevMI) const;
186
187 enum SuitabilityResult {
188 SR_Suitable,
189 SR_Unsuitable,
190 SR_Impossible
191 };
192
193 /// Return SR_Suitable if \p MI a memory operation that can be used to
194 /// implicitly null check the value in \p PointerReg, SR_Unsuitable if
195 /// \p MI cannot be used to null check and SR_Impossible if there is
196 /// no sense to continue lookup due to any other instruction will not be able
197 /// to be used. \p PrevInsts is the set of instruction seen since
198 /// the explicit null check on \p PointerReg.
199 SuitabilityResult isSuitableMemoryOp(const MachineInstr &MI,
200 unsigned PointerReg,
201 ArrayRef<MachineInstr *> PrevInsts);
202
203 /// Returns true if \p DependenceMI can clobber the liveIns in NullSucc block
204 /// if it was hoisted to the NullCheck block. This is used by caller
205 /// canHoistInst to decide if DependenceMI can be hoisted safely.
206 bool canDependenceHoistingClobberLiveIns(MachineInstr *DependenceMI,
207 MachineBasicBlock *NullSucc);
208
209 /// Return true if \p FaultingMI can be hoisted from after the
210 /// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a
211 /// non-null value if we also need to (and legally can) hoist a dependency.
212 bool canHoistInst(MachineInstr *FaultingMI,
213 ArrayRef<MachineInstr *> InstsSeenSoFar,
214 MachineBasicBlock *NullSucc, MachineInstr *&Dependence);
215
216 public:
217 static char ID;
218
ImplicitNullChecks()219 ImplicitNullChecks() : MachineFunctionPass(ID) {
220 initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry());
221 }
222
223 bool runOnMachineFunction(MachineFunction &MF) override;
224
getAnalysisUsage(AnalysisUsage & AU) const225 void getAnalysisUsage(AnalysisUsage &AU) const override {
226 AU.addRequired<AAResultsWrapperPass>();
227 MachineFunctionPass::getAnalysisUsage(AU);
228 }
229
getRequiredProperties() const230 MachineFunctionProperties getRequiredProperties() const override {
231 return MachineFunctionProperties().set(
232 MachineFunctionProperties::Property::NoVRegs);
233 }
234 };
235
236 } // end anonymous namespace
237
canHandle(const MachineInstr * MI)238 bool ImplicitNullChecks::canHandle(const MachineInstr *MI) {
239 if (MI->isCall() || MI->mayRaiseFPException() ||
240 MI->hasUnmodeledSideEffects())
241 return false;
242 auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); };
243 (void)IsRegMask;
244
245 assert(!llvm::any_of(MI->operands(), IsRegMask) &&
246 "Calls were filtered out above!");
247
248 auto IsUnordered = [](MachineMemOperand *MMO) { return MMO->isUnordered(); };
249 return llvm::all_of(MI->memoperands(), IsUnordered);
250 }
251
252 ImplicitNullChecks::DependenceResult
computeDependence(const MachineInstr * MI,ArrayRef<MachineInstr * > Block)253 ImplicitNullChecks::computeDependence(const MachineInstr *MI,
254 ArrayRef<MachineInstr *> Block) {
255 assert(llvm::all_of(Block, canHandle) && "Check this first!");
256 assert(!is_contained(Block, MI) && "Block must be exclusive of MI!");
257
258 Optional<ArrayRef<MachineInstr *>::iterator> Dep;
259
260 for (auto I = Block.begin(), E = Block.end(); I != E; ++I) {
261 if (canReorder(*I, MI))
262 continue;
263
264 if (Dep == None) {
265 // Found one possible dependency, keep track of it.
266 Dep = I;
267 } else {
268 // We found two dependencies, so bail out.
269 return {false, None};
270 }
271 }
272
273 return {true, Dep};
274 }
275
canReorder(const MachineInstr * A,const MachineInstr * B)276 bool ImplicitNullChecks::canReorder(const MachineInstr *A,
277 const MachineInstr *B) {
278 assert(canHandle(A) && canHandle(B) && "Precondition!");
279
280 // canHandle makes sure that we _can_ correctly analyze the dependencies
281 // between A and B here -- for instance, we should not be dealing with heap
282 // load-store dependencies here.
283
284 for (const auto &MOA : A->operands()) {
285 if (!(MOA.isReg() && MOA.getReg()))
286 continue;
287
288 Register RegA = MOA.getReg();
289 for (const auto &MOB : B->operands()) {
290 if (!(MOB.isReg() && MOB.getReg()))
291 continue;
292
293 Register RegB = MOB.getReg();
294
295 if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef()))
296 return false;
297 }
298 }
299
300 return true;
301 }
302
runOnMachineFunction(MachineFunction & MF)303 bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) {
304 TII = MF.getSubtarget().getInstrInfo();
305 TRI = MF.getRegInfo().getTargetRegisterInfo();
306 MFI = &MF.getFrameInfo();
307 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
308
309 SmallVector<NullCheck, 16> NullCheckList;
310
311 for (auto &MBB : MF)
312 analyzeBlockForNullChecks(MBB, NullCheckList);
313
314 if (!NullCheckList.empty())
315 rewriteNullChecks(NullCheckList);
316
317 return !NullCheckList.empty();
318 }
319
320 // Return true if any register aliasing \p Reg is live-in into \p MBB.
AnyAliasLiveIn(const TargetRegisterInfo * TRI,MachineBasicBlock * MBB,unsigned Reg)321 static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI,
322 MachineBasicBlock *MBB, unsigned Reg) {
323 for (MCRegAliasIterator AR(Reg, TRI, /*IncludeSelf*/ true); AR.isValid();
324 ++AR)
325 if (MBB->isLiveIn(*AR))
326 return true;
327 return false;
328 }
329
330 ImplicitNullChecks::AliasResult
areMemoryOpsAliased(const MachineInstr & MI,const MachineInstr * PrevMI) const331 ImplicitNullChecks::areMemoryOpsAliased(const MachineInstr &MI,
332 const MachineInstr *PrevMI) const {
333 // If it is not memory access, skip the check.
334 if (!(PrevMI->mayStore() || PrevMI->mayLoad()))
335 return AR_NoAlias;
336 // Load-Load may alias
337 if (!(MI.mayStore() || PrevMI->mayStore()))
338 return AR_NoAlias;
339 // We lost info, conservatively alias. If it was store then no sense to
340 // continue because we won't be able to check against it further.
341 if (MI.memoperands_empty())
342 return MI.mayStore() ? AR_WillAliasEverything : AR_MayAlias;
343 if (PrevMI->memoperands_empty())
344 return PrevMI->mayStore() ? AR_WillAliasEverything : AR_MayAlias;
345
346 for (MachineMemOperand *MMO1 : MI.memoperands()) {
347 // MMO1 should have a value due it comes from operation we'd like to use
348 // as implicit null check.
349 assert(MMO1->getValue() && "MMO1 should have a Value!");
350 for (MachineMemOperand *MMO2 : PrevMI->memoperands()) {
351 if (const PseudoSourceValue *PSV = MMO2->getPseudoValue()) {
352 if (PSV->mayAlias(MFI))
353 return AR_MayAlias;
354 continue;
355 }
356 llvm::AliasResult AAResult = AA->alias(
357 MemoryLocation::getAfter(MMO1->getValue(), MMO1->getAAInfo()),
358 MemoryLocation::getAfter(MMO2->getValue(), MMO2->getAAInfo()));
359 if (AAResult != NoAlias)
360 return AR_MayAlias;
361 }
362 }
363 return AR_NoAlias;
364 }
365
366 ImplicitNullChecks::SuitabilityResult
isSuitableMemoryOp(const MachineInstr & MI,unsigned PointerReg,ArrayRef<MachineInstr * > PrevInsts)367 ImplicitNullChecks::isSuitableMemoryOp(const MachineInstr &MI,
368 unsigned PointerReg,
369 ArrayRef<MachineInstr *> PrevInsts) {
370 // Implementation restriction for faulting_op insertion
371 // TODO: This could be relaxed if we find a test case which warrants it.
372 if (MI.getDesc().getNumDefs() > 1)
373 return SR_Unsuitable;
374
375 if (!MI.mayLoadOrStore() || MI.isPredicable())
376 return SR_Unsuitable;
377 auto AM = TII->getAddrModeFromMemoryOp(MI, TRI);
378 if (!AM)
379 return SR_Unsuitable;
380 auto AddrMode = *AM;
381 const Register BaseReg = AddrMode.BaseReg, ScaledReg = AddrMode.ScaledReg;
382 int64_t Displacement = AddrMode.Displacement;
383
384 // We need the base of the memory instruction to be same as the register
385 // where the null check is performed (i.e. PointerReg).
386 if (BaseReg != PointerReg && ScaledReg != PointerReg)
387 return SR_Unsuitable;
388 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
389 unsigned PointerRegSizeInBits = TRI->getRegSizeInBits(PointerReg, MRI);
390 // Bail out of the sizes of BaseReg, ScaledReg and PointerReg are not the
391 // same.
392 if ((BaseReg &&
393 TRI->getRegSizeInBits(BaseReg, MRI) != PointerRegSizeInBits) ||
394 (ScaledReg &&
395 TRI->getRegSizeInBits(ScaledReg, MRI) != PointerRegSizeInBits))
396 return SR_Unsuitable;
397
398 // Returns true if RegUsedInAddr is used for calculating the displacement
399 // depending on addressing mode. Also calculates the Displacement.
400 auto CalculateDisplacementFromAddrMode = [&](Register RegUsedInAddr,
401 int64_t Multiplier) {
402 // The register can be NoRegister, which is defined as zero for all targets.
403 // Consider instruction of interest as `movq 8(,%rdi,8), %rax`. Here the
404 // ScaledReg is %rdi, while there is no BaseReg.
405 if (!RegUsedInAddr)
406 return false;
407 assert(Multiplier && "expected to be non-zero!");
408 MachineInstr *ModifyingMI = nullptr;
409 for (auto It = std::next(MachineBasicBlock::const_reverse_iterator(&MI));
410 It != MI.getParent()->rend(); It++) {
411 const MachineInstr *CurrMI = &*It;
412 if (CurrMI->modifiesRegister(RegUsedInAddr, TRI)) {
413 ModifyingMI = const_cast<MachineInstr *>(CurrMI);
414 break;
415 }
416 }
417 if (!ModifyingMI)
418 return false;
419 // Check for the const value defined in register by ModifyingMI. This means
420 // all other previous values for that register has been invalidated.
421 int64_t ImmVal;
422 if (!TII->getConstValDefinedInReg(*ModifyingMI, RegUsedInAddr, ImmVal))
423 return false;
424 // Calculate the reg size in bits, since this is needed for bailing out in
425 // case of overflow.
426 int32_t RegSizeInBits = TRI->getRegSizeInBits(RegUsedInAddr, MRI);
427 APInt ImmValC(RegSizeInBits, ImmVal, true /*IsSigned*/);
428 APInt MultiplierC(RegSizeInBits, Multiplier);
429 assert(MultiplierC.isStrictlyPositive() &&
430 "expected to be a positive value!");
431 bool IsOverflow;
432 // Sign of the product depends on the sign of the ImmVal, since Multiplier
433 // is always positive.
434 APInt Product = ImmValC.smul_ov(MultiplierC, IsOverflow);
435 if (IsOverflow)
436 return false;
437 APInt DisplacementC(64, Displacement, true /*isSigned*/);
438 DisplacementC = Product.sadd_ov(DisplacementC, IsOverflow);
439 if (IsOverflow)
440 return false;
441
442 // We only handle diplacements upto 64 bits wide.
443 if (DisplacementC.getActiveBits() > 64)
444 return false;
445 Displacement = DisplacementC.getSExtValue();
446 return true;
447 };
448
449 // If a register used in the address is constant, fold it's effect into the
450 // displacement for ease of analysis.
451 bool BaseRegIsConstVal = false, ScaledRegIsConstVal = false;
452 if (CalculateDisplacementFromAddrMode(BaseReg, 1))
453 BaseRegIsConstVal = true;
454 if (CalculateDisplacementFromAddrMode(ScaledReg, AddrMode.Scale))
455 ScaledRegIsConstVal = true;
456
457 // The register which is not null checked should be part of the Displacement
458 // calculation, otherwise we do not know whether the Displacement is made up
459 // by some symbolic values.
460 // This matters because we do not want to incorrectly assume that load from
461 // falls in the zeroth faulting page in the "sane offset check" below.
462 if ((BaseReg && BaseReg != PointerReg && !BaseRegIsConstVal) ||
463 (ScaledReg && ScaledReg != PointerReg && !ScaledRegIsConstVal))
464 return SR_Unsuitable;
465
466 // We want the mem access to be issued at a sane offset from PointerReg,
467 // so that if PointerReg is null then the access reliably page faults.
468 if (!(-PageSize < Displacement && Displacement < PageSize))
469 return SR_Unsuitable;
470
471 // Finally, check whether the current memory access aliases with previous one.
472 for (auto *PrevMI : PrevInsts) {
473 AliasResult AR = areMemoryOpsAliased(MI, PrevMI);
474 if (AR == AR_WillAliasEverything)
475 return SR_Impossible;
476 if (AR == AR_MayAlias)
477 return SR_Unsuitable;
478 }
479 return SR_Suitable;
480 }
481
canDependenceHoistingClobberLiveIns(MachineInstr * DependenceMI,MachineBasicBlock * NullSucc)482 bool ImplicitNullChecks::canDependenceHoistingClobberLiveIns(
483 MachineInstr *DependenceMI, MachineBasicBlock *NullSucc) {
484 for (const auto &DependenceMO : DependenceMI->operands()) {
485 if (!(DependenceMO.isReg() && DependenceMO.getReg()))
486 continue;
487
488 // Make sure that we won't clobber any live ins to the sibling block by
489 // hoisting Dependency. For instance, we can't hoist INST to before the
490 // null check (even if it safe, and does not violate any dependencies in
491 // the non_null_block) if %rdx is live in to _null_block.
492 //
493 // test %rcx, %rcx
494 // je _null_block
495 // _non_null_block:
496 // %rdx = INST
497 // ...
498 //
499 // This restriction does not apply to the faulting load inst because in
500 // case the pointer loaded from is in the null page, the load will not
501 // semantically execute, and affect machine state. That is, if the load
502 // was loading into %rax and it faults, the value of %rax should stay the
503 // same as it would have been had the load not have executed and we'd have
504 // branched to NullSucc directly.
505 if (AnyAliasLiveIn(TRI, NullSucc, DependenceMO.getReg()))
506 return true;
507
508 }
509
510 // The dependence does not clobber live-ins in NullSucc block.
511 return false;
512 }
513
canHoistInst(MachineInstr * FaultingMI,ArrayRef<MachineInstr * > InstsSeenSoFar,MachineBasicBlock * NullSucc,MachineInstr * & Dependence)514 bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI,
515 ArrayRef<MachineInstr *> InstsSeenSoFar,
516 MachineBasicBlock *NullSucc,
517 MachineInstr *&Dependence) {
518 auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar);
519 if (!DepResult.CanReorder)
520 return false;
521
522 if (!DepResult.PotentialDependence) {
523 Dependence = nullptr;
524 return true;
525 }
526
527 auto DependenceItr = *DepResult.PotentialDependence;
528 auto *DependenceMI = *DependenceItr;
529
530 // We don't want to reason about speculating loads. Note -- at this point
531 // we should have already filtered out all of the other non-speculatable
532 // things, like calls and stores.
533 // We also do not want to hoist stores because it might change the memory
534 // while the FaultingMI may result in faulting.
535 assert(canHandle(DependenceMI) && "Should never have reached here!");
536 if (DependenceMI->mayLoadOrStore())
537 return false;
538
539 if (canDependenceHoistingClobberLiveIns(DependenceMI, NullSucc))
540 return false;
541
542 auto DepDepResult =
543 computeDependence(DependenceMI, {InstsSeenSoFar.begin(), DependenceItr});
544
545 if (!DepDepResult.CanReorder || DepDepResult.PotentialDependence)
546 return false;
547
548 Dependence = DependenceMI;
549 return true;
550 }
551
552 /// Analyze MBB to check if its terminating branch can be turned into an
553 /// implicit null check. If yes, append a description of the said null check to
554 /// NullCheckList and return true, else return false.
analyzeBlockForNullChecks(MachineBasicBlock & MBB,SmallVectorImpl<NullCheck> & NullCheckList)555 bool ImplicitNullChecks::analyzeBlockForNullChecks(
556 MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) {
557 using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate;
558
559 MDNode *BranchMD = nullptr;
560 if (auto *BB = MBB.getBasicBlock())
561 BranchMD = BB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit);
562
563 if (!BranchMD)
564 return false;
565
566 MachineBranchPredicate MBP;
567
568 if (TII->analyzeBranchPredicate(MBB, MBP, true))
569 return false;
570
571 // Is the predicate comparing an integer to zero?
572 if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
573 (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
574 MBP.Predicate == MachineBranchPredicate::PRED_EQ)))
575 return false;
576
577 // If there is a separate condition generation instruction, we chose not to
578 // transform unless we can remove both condition and consuming branch.
579 if (MBP.ConditionDef && !MBP.SingleUseCondition)
580 return false;
581
582 MachineBasicBlock *NotNullSucc, *NullSucc;
583
584 if (MBP.Predicate == MachineBranchPredicate::PRED_NE) {
585 NotNullSucc = MBP.TrueDest;
586 NullSucc = MBP.FalseDest;
587 } else {
588 NotNullSucc = MBP.FalseDest;
589 NullSucc = MBP.TrueDest;
590 }
591
592 // We handle the simplest case for now. We can potentially do better by using
593 // the machine dominator tree.
594 if (NotNullSucc->pred_size() != 1)
595 return false;
596
597 const Register PointerReg = MBP.LHS.getReg();
598
599 if (MBP.ConditionDef) {
600 // To prevent the invalid transformation of the following code:
601 //
602 // mov %rax, %rcx
603 // test %rax, %rax
604 // %rax = ...
605 // je throw_npe
606 // mov(%rcx), %r9
607 // mov(%rax), %r10
608 //
609 // into:
610 //
611 // mov %rax, %rcx
612 // %rax = ....
613 // faulting_load_op("movl (%rax), %r10", throw_npe)
614 // mov(%rcx), %r9
615 //
616 // we must ensure that there are no instructions between the 'test' and
617 // conditional jump that modify %rax.
618 assert(MBP.ConditionDef->getParent() == &MBB &&
619 "Should be in basic block");
620
621 for (auto I = MBB.rbegin(); MBP.ConditionDef != &*I; ++I)
622 if (I->modifiesRegister(PointerReg, TRI))
623 return false;
624 }
625 // Starting with a code fragment like:
626 //
627 // test %rax, %rax
628 // jne LblNotNull
629 //
630 // LblNull:
631 // callq throw_NullPointerException
632 //
633 // LblNotNull:
634 // Inst0
635 // Inst1
636 // ...
637 // Def = Load (%rax + <offset>)
638 // ...
639 //
640 //
641 // we want to end up with
642 //
643 // Def = FaultingLoad (%rax + <offset>), LblNull
644 // jmp LblNotNull ;; explicit or fallthrough
645 //
646 // LblNotNull:
647 // Inst0
648 // Inst1
649 // ...
650 //
651 // LblNull:
652 // callq throw_NullPointerException
653 //
654 //
655 // To see why this is legal, consider the two possibilities:
656 //
657 // 1. %rax is null: since we constrain <offset> to be less than PageSize, the
658 // load instruction dereferences the null page, causing a segmentation
659 // fault.
660 //
661 // 2. %rax is not null: in this case we know that the load cannot fault, as
662 // otherwise the load would've faulted in the original program too and the
663 // original program would've been undefined.
664 //
665 // This reasoning cannot be extended to justify hoisting through arbitrary
666 // control flow. For instance, in the example below (in pseudo-C)
667 //
668 // if (ptr == null) { throw_npe(); unreachable; }
669 // if (some_cond) { return 42; }
670 // v = ptr->field; // LD
671 // ...
672 //
673 // we cannot (without code duplication) use the load marked "LD" to null check
674 // ptr -- clause (2) above does not apply in this case. In the above program
675 // the safety of ptr->field can be dependent on some_cond; and, for instance,
676 // ptr could be some non-null invalid reference that never gets loaded from
677 // because some_cond is always true.
678
679 SmallVector<MachineInstr *, 8> InstsSeenSoFar;
680
681 for (auto &MI : *NotNullSucc) {
682 if (!canHandle(&MI) || InstsSeenSoFar.size() >= MaxInstsToConsider)
683 return false;
684
685 MachineInstr *Dependence;
686 SuitabilityResult SR = isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar);
687 if (SR == SR_Impossible)
688 return false;
689 if (SR == SR_Suitable &&
690 canHoistInst(&MI, InstsSeenSoFar, NullSucc, Dependence)) {
691 NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc,
692 NullSucc, Dependence);
693 return true;
694 }
695
696 // If MI re-defines the PointerReg in a way that changes the value of
697 // PointerReg if it was null, then we cannot move further.
698 if (!TII->preservesZeroValueInReg(&MI, PointerReg, TRI))
699 return false;
700 InstsSeenSoFar.push_back(&MI);
701 }
702
703 return false;
704 }
705
706 /// Wrap a machine instruction, MI, into a FAULTING machine instruction.
707 /// The FAULTING instruction does the same load/store as MI
708 /// (defining the same register), and branches to HandlerMBB if the mem access
709 /// faults. The FAULTING instruction is inserted at the end of MBB.
insertFaultingInstr(MachineInstr * MI,MachineBasicBlock * MBB,MachineBasicBlock * HandlerMBB)710 MachineInstr *ImplicitNullChecks::insertFaultingInstr(
711 MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) {
712 const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for
713 // all targets.
714
715 DebugLoc DL;
716 unsigned NumDefs = MI->getDesc().getNumDefs();
717 assert(NumDefs <= 1 && "other cases unhandled!");
718
719 unsigned DefReg = NoRegister;
720 if (NumDefs != 0) {
721 DefReg = MI->getOperand(0).getReg();
722 assert(NumDefs == 1 && "expected exactly one def!");
723 }
724
725 FaultMaps::FaultKind FK;
726 if (MI->mayLoad())
727 FK =
728 MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad;
729 else
730 FK = FaultMaps::FaultingStore;
731
732 auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_OP), DefReg)
733 .addImm(FK)
734 .addMBB(HandlerMBB)
735 .addImm(MI->getOpcode());
736
737 for (auto &MO : MI->uses()) {
738 if (MO.isReg()) {
739 MachineOperand NewMO = MO;
740 if (MO.isUse()) {
741 NewMO.setIsKill(false);
742 } else {
743 assert(MO.isDef() && "Expected def or use");
744 NewMO.setIsDead(false);
745 }
746 MIB.add(NewMO);
747 } else {
748 MIB.add(MO);
749 }
750 }
751
752 MIB.setMemRefs(MI->memoperands());
753
754 return MIB;
755 }
756
757 /// Rewrite the null checks in NullCheckList into implicit null checks.
rewriteNullChecks(ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList)758 void ImplicitNullChecks::rewriteNullChecks(
759 ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) {
760 DebugLoc DL;
761
762 for (auto &NC : NullCheckList) {
763 // Remove the conditional branch dependent on the null check.
764 unsigned BranchesRemoved = TII->removeBranch(*NC.getCheckBlock());
765 (void)BranchesRemoved;
766 assert(BranchesRemoved > 0 && "expected at least one branch!");
767
768 if (auto *DepMI = NC.getOnlyDependency()) {
769 DepMI->removeFromParent();
770 NC.getCheckBlock()->insert(NC.getCheckBlock()->end(), DepMI);
771 }
772
773 // Insert a faulting instruction where the conditional branch was
774 // originally. We check earlier ensures that this bit of code motion
775 // is legal. We do not touch the successors list for any basic block
776 // since we haven't changed control flow, we've just made it implicit.
777 MachineInstr *FaultingInstr = insertFaultingInstr(
778 NC.getMemOperation(), NC.getCheckBlock(), NC.getNullSucc());
779 // Now the values defined by MemOperation, if any, are live-in of
780 // the block of MemOperation.
781 // The original operation may define implicit-defs alongside
782 // the value.
783 MachineBasicBlock *MBB = NC.getMemOperation()->getParent();
784 for (const MachineOperand &MO : FaultingInstr->operands()) {
785 if (!MO.isReg() || !MO.isDef())
786 continue;
787 Register Reg = MO.getReg();
788 if (!Reg || MBB->isLiveIn(Reg))
789 continue;
790 MBB->addLiveIn(Reg);
791 }
792
793 if (auto *DepMI = NC.getOnlyDependency()) {
794 for (auto &MO : DepMI->operands()) {
795 if (!MO.isReg() || !MO.getReg() || !MO.isDef() || MO.isDead())
796 continue;
797 if (!NC.getNotNullSucc()->isLiveIn(MO.getReg()))
798 NC.getNotNullSucc()->addLiveIn(MO.getReg());
799 }
800 }
801
802 NC.getMemOperation()->eraseFromParent();
803 if (auto *CheckOp = NC.getCheckOperation())
804 CheckOp->eraseFromParent();
805
806 // Insert an *unconditional* branch to not-null successor - we expect
807 // block placement to remove fallthroughs later.
808 TII->insertBranch(*NC.getCheckBlock(), NC.getNotNullSucc(), nullptr,
809 /*Cond=*/None, DL);
810
811 NumImplicitNullChecks++;
812 }
813 }
814
815 char ImplicitNullChecks::ID = 0;
816
817 char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID;
818
819 INITIALIZE_PASS_BEGIN(ImplicitNullChecks, DEBUG_TYPE,
820 "Implicit null checks", false, false)
821 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
822 INITIALIZE_PASS_END(ImplicitNullChecks, DEBUG_TYPE,
823 "Implicit null checks", false, false)
824