LLVM 23.0.0git
InstCombineLoadStoreAlloca.cpp
Go to the documentation of this file.
1//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visit functions for load, store and alloca.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/Statistic.h"
18#include "llvm/Analysis/Loads.h"
19#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/LLVMContext.h"
25using namespace llvm;
26using namespace PatternMatch;
27
28#define DEBUG_TYPE "instcombine"
29
30namespace llvm {
32}
33
34STATISTIC(NumDeadStore, "Number of dead stores eliminated");
35STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
36
38 "instcombine-max-copied-from-constant-users", cl::init(300),
39 cl::desc("Maximum users to visit in copy from constant transform"),
41
42/// isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived)
43/// pointer to an alloca. Ignore any reads of the pointer, return false if we
44/// see any stores or other unknown uses. If we see pointer arithmetic, keep
45/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
46/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
47/// the alloca, and if the source pointer is a pointer to a constant memory
48/// location, we can optimize this.
49static bool
51 MemTransferInst *&TheCopy,
53 // We track lifetime intrinsics as we encounter them. If we decide to go
54 // ahead and replace the value with the memory location, this lets the caller
55 // quickly eliminate the markers.
56
57 using ValueAndIsOffset = PointerIntPair<Value *, 1, bool>;
60 Worklist.emplace_back(V, false);
61 while (!Worklist.empty()) {
62 ValueAndIsOffset Elem = Worklist.pop_back_val();
63 if (!Visited.insert(Elem).second)
64 continue;
65 if (Visited.size() > MaxCopiedFromConstantUsers)
66 return false;
67
68 const auto [Value, IsOffset] = Elem;
69 for (auto &U : Value->uses()) {
70 auto *I = cast<Instruction>(U.getUser());
71
72 if (auto *LI = dyn_cast<LoadInst>(I)) {
73 // Ignore non-volatile loads, they are always ok.
74 if (!LI->isSimple()) return false;
75 continue;
76 }
77
79 // We set IsOffset=true, to forbid the memcpy from occurring after the
80 // phi: If one of the phi operands is not based on the alloca, we
81 // would incorrectly omit a write.
82 Worklist.emplace_back(I, true);
83 continue;
84 }
86 // If uses of the bitcast are ok, we are ok.
87 Worklist.emplace_back(I, IsOffset);
88 continue;
89 }
90 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
91 // If the GEP has all zero indices, it doesn't offset the pointer. If it
92 // doesn't, it does.
93 Worklist.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
94 continue;
95 }
96
97 if (auto *Call = dyn_cast<CallBase>(I)) {
98 // If this is the function being called then we treat it like a load and
99 // ignore it.
100 if (Call->isCallee(&U))
101 continue;
102
103 unsigned DataOpNo = Call->getDataOperandNo(&U);
104 bool IsArgOperand = Call->isArgOperand(&U);
105
106 // Inalloca arguments are clobbered by the call.
107 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
108 return false;
109
110 // If this call site doesn't modify the memory, then we know it is just
111 // a load (but one that potentially returns the value itself), so we can
112 // ignore it if we know that the value isn't captured.
113 bool NoCapture = Call->doesNotCapture(DataOpNo);
114 if (NoCapture &&
115 (Call->onlyReadsMemory() || Call->onlyReadsMemory(DataOpNo)))
116 continue;
117 }
118
119 // Lifetime intrinsics can be handled by the caller.
120 if (I->isLifetimeStartOrEnd()) {
121 assert(I->use_empty() && "Lifetime markers have no result to use!");
122 ToDelete.push_back(I);
123 continue;
124 }
125
126 // If this is isn't our memcpy/memmove, reject it as something we can't
127 // handle.
129 if (!MI)
130 return false;
131
132 // If the transfer is volatile, reject it.
133 if (MI->isVolatile())
134 return false;
135
136 // If the transfer is using the alloca as a source of the transfer, then
137 // ignore it since it is a load (unless the transfer is volatile).
138 if (U.getOperandNo() == 1)
139 continue;
140
141 // If we already have seen a copy, reject the second one.
142 if (TheCopy) return false;
143
144 // If the pointer has been offset from the start of the alloca, we can't
145 // safely handle this.
146 if (IsOffset) return false;
147
148 // If the memintrinsic isn't using the alloca as the dest, reject it.
149 if (U.getOperandNo() != 0) return false;
150
151 // If the source of the memcpy/move is not constant, reject it.
152 if (isModSet(AA->getModRefInfoMask(MI->getSource())))
153 return false;
154
155 // Otherwise, the transform is safe. Remember the copy instruction.
156 TheCopy = MI;
157 }
158 }
159 return true;
160}
161
162/// isOnlyCopiedFromConstantMemory - Return true if the specified alloca is only
163/// modified by a copy from a constant memory location. If we can prove this, we
164/// can replace any uses of the alloca with uses of the memory location
165/// directly.
166static MemTransferInst *
168 AllocaInst *AI,
170 MemTransferInst *TheCopy = nullptr;
171 if (isOnlyCopiedFromConstantMemory(AA, AI, TheCopy, ToDelete))
172 return TheCopy;
173 return nullptr;
174}
175
176/// Returns true if V is dereferenceable for size of alloca.
177static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
178 const DataLayout &DL) {
179 std::optional<TypeSize> AllocaSize = AI->getAllocationSize(DL);
180 if (!AllocaSize || AllocaSize->isScalable())
181 return false;
183 APInt(64, *AllocaSize), DL);
184}
185
187 AllocaInst &AI, DominatorTree &DT) {
188 // Check for array size of 1 (scalar allocation).
189 if (!AI.isArrayAllocation()) {
190 // i32 1 is the canonical array size for scalar allocations.
191 if (AI.getArraySize()->getType()->isIntegerTy(32))
192 return nullptr;
193
194 // Canonicalize it.
195 return IC.replaceOperand(AI, 0, IC.Builder.getInt32(1));
196 }
197
198 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
199 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
200 if (C->getValue().getActiveBits() <= 64) {
201 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
202 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, AI.getAddressSpace(),
203 nullptr, AI.getName());
204 New->setAlignment(AI.getAlign());
205 New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
206
207 replaceAllDbgUsesWith(AI, *New, *New, DT);
208 return IC.replaceInstUsesWith(AI, New);
209 }
210 }
211
214
215 // Ensure that the alloca array size argument has type equal to the offset
216 // size of the alloca() pointer, which, in the tyical case, is intptr_t,
217 // so that any casting is exposed early.
218 Type *PtrIdxTy = IC.getDataLayout().getIndexType(AI.getType());
219 if (AI.getArraySize()->getType() != PtrIdxTy) {
220 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), PtrIdxTy, false);
221 return IC.replaceOperand(AI, 0, V);
222 }
223
224 return nullptr;
225}
226
227namespace {
228// If I and V are pointers in different address space, it is not allowed to
229// use replaceAllUsesWith since I and V have different types. A
230// non-target-specific transformation should not use addrspacecast on V since
231// the two address space may be disjoint depending on target.
232//
233// This class chases down uses of the old pointer until reaching the load
234// instructions, then replaces the old pointer in the load instructions with
235// the new pointer. If during the chasing it sees bitcast or GEP, it will
236// create new bitcast or GEP with the new pointer and use them in the load
237// instruction.
238class PointerReplacer {
239public:
240 PointerReplacer(InstCombinerImpl &IC, Instruction &Root, unsigned SrcAS)
241 : IC(IC), Root(Root), FromAS(SrcAS) {}
242
243 bool collectUsers();
244 void replacePointer(Value *V);
245
246private:
247 void replace(Instruction *I);
248 Value *getReplacement(Value *V) const { return WorkMap.lookup(V); }
249 bool isAvailable(Instruction *I) const {
250 return I == &Root || UsersToReplace.contains(I);
251 }
252
253 bool isEqualOrValidAddrSpaceCast(const Instruction *I,
254 unsigned FromAS) const {
255 const auto *ASC = dyn_cast<AddrSpaceCastInst>(I);
256 if (!ASC)
257 return false;
258 unsigned ToAS = ASC->getDestAddressSpace();
259 return (FromAS == ToAS) || IC.isValidAddrSpaceCast(FromAS, ToAS);
260 }
261
262 SmallSetVector<Instruction *, 32> UsersToReplace;
263 MapVector<Value *, Value *> WorkMap;
264 InstCombinerImpl &IC;
265 Instruction &Root;
266 unsigned FromAS;
267};
268} // end anonymous namespace
269
270bool PointerReplacer::collectUsers() {
272 SmallSetVector<Instruction *, 32> ValuesToRevisit;
273
274 auto PushUsersToWorklist = [&](Instruction *Inst) {
275 for (auto *U : Inst->users())
276 if (auto *I = dyn_cast<Instruction>(U))
277 if (!isAvailable(I) && !ValuesToRevisit.contains(I))
278 Worklist.emplace_back(I);
279 };
280
281 auto TryPushInstOperand = [&](Instruction *InstOp) {
282 if (!UsersToReplace.contains(InstOp)) {
283 if (!ValuesToRevisit.insert(InstOp))
284 return false;
285 Worklist.emplace_back(InstOp);
286 }
287 return true;
288 };
289
290 PushUsersToWorklist(&Root);
291 while (!Worklist.empty()) {
292 Instruction *Inst = Worklist.pop_back_val();
293 if (auto *Load = dyn_cast<LoadInst>(Inst)) {
294 if (Load->isVolatile())
295 return false;
296 UsersToReplace.insert(Load);
297 } else if (auto *PHI = dyn_cast<PHINode>(Inst)) {
298 /// TODO: Handle poison and null pointers for PHI and select.
299 // If all incoming values are available, mark this PHI as
300 // replacable and push it's users into the worklist.
301 bool IsReplaceable = all_of(PHI->incoming_values(),
302 [](Value *V) { return isa<Instruction>(V); });
303 if (IsReplaceable && all_of(PHI->incoming_values(), [&](Value *V) {
304 return isAvailable(cast<Instruction>(V));
305 })) {
306 UsersToReplace.insert(PHI);
307 PushUsersToWorklist(PHI);
308 continue;
309 }
310
311 // Either an incoming value is not an instruction or not all
312 // incoming values are available. If this PHI was already
313 // visited prior to this iteration, return false.
314 if (!IsReplaceable || !ValuesToRevisit.insert(PHI))
315 return false;
316
317 // Push PHI back into the stack, followed by unavailable
318 // incoming values.
319 Worklist.emplace_back(PHI);
320 for (unsigned Idx = 0; Idx < PHI->getNumIncomingValues(); ++Idx) {
321 if (!TryPushInstOperand(cast<Instruction>(PHI->getIncomingValue(Idx))))
322 return false;
323 }
324 } else if (auto *SI = dyn_cast<SelectInst>(Inst)) {
325 auto *TrueInst = dyn_cast<Instruction>(SI->getTrueValue());
326 auto *FalseInst = dyn_cast<Instruction>(SI->getFalseValue());
327 if (!TrueInst || !FalseInst)
328 return false;
329
330 if (isAvailable(TrueInst) && isAvailable(FalseInst)) {
331 UsersToReplace.insert(SI);
332 PushUsersToWorklist(SI);
333 continue;
334 }
335
336 // Push select back onto the stack, followed by unavailable true/false
337 // value.
338 Worklist.emplace_back(SI);
339 if (!TryPushInstOperand(TrueInst) || !TryPushInstOperand(FalseInst))
340 return false;
341 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
342 auto *PtrOp = dyn_cast<Instruction>(GEP->getPointerOperand());
343 if (!PtrOp)
344 return false;
345 if (isAvailable(PtrOp)) {
346 UsersToReplace.insert(GEP);
347 PushUsersToWorklist(GEP);
348 continue;
349 }
350
351 Worklist.emplace_back(GEP);
352 if (!TryPushInstOperand(PtrOp))
353 return false;
354 } else if (auto *MI = dyn_cast<MemTransferInst>(Inst)) {
355 if (MI->isVolatile())
356 return false;
357 UsersToReplace.insert(Inst);
358 } else if (isEqualOrValidAddrSpaceCast(Inst, FromAS)) {
359 UsersToReplace.insert(Inst);
360 PushUsersToWorklist(Inst);
361 } else if (Inst->isLifetimeStartOrEnd()) {
362 continue;
363 } else {
364 // TODO: For arbitrary uses with address space mismatches, should we check
365 // if we can introduce a valid addrspacecast?
366 LLVM_DEBUG(dbgs() << "Cannot handle pointer user: " << *Inst << '\n');
367 return false;
368 }
369 }
370
371 return true;
372}
373
374void PointerReplacer::replacePointer(Value *V) {
375 assert(cast<PointerType>(Root.getType()) != cast<PointerType>(V->getType()) &&
376 "Invalid usage");
377 WorkMap[&Root] = V;
379 SetVector<Instruction *> PostOrderWorklist;
380 SmallPtrSet<Instruction *, 32> Visited;
381
382 // Perform a postorder traversal of the users of Root.
383 Worklist.push_back(&Root);
384 while (!Worklist.empty()) {
385 Instruction *I = Worklist.back();
386
387 // If I has not been processed before, push each of its
388 // replacable users into the worklist.
389 if (Visited.insert(I).second) {
390 for (auto *U : I->users()) {
391 auto *UserInst = cast<Instruction>(U);
392 if (UsersToReplace.contains(UserInst) && !Visited.contains(UserInst))
393 Worklist.push_back(UserInst);
394 }
395 // Otherwise, users of I have already been pushed into
396 // the PostOrderWorklist. Push I as well.
397 } else {
398 PostOrderWorklist.insert(I);
399 Worklist.pop_back();
400 }
401 }
402
403 // Replace pointers in reverse-postorder.
404 for (Instruction *I : reverse(PostOrderWorklist))
405 replace(I);
406}
407
408void PointerReplacer::replace(Instruction *I) {
409 if (getReplacement(I))
410 return;
411
412 if (auto *LT = dyn_cast<LoadInst>(I)) {
413 auto *V = getReplacement(LT->getPointerOperand());
414 assert(V && "Operand not replaced");
415 auto *NewI = new LoadInst(LT->getType(), V, "", LT->isVolatile(),
416 LT->getAlign(), LT->getOrdering(),
417 LT->getSyncScopeID());
418 NewI->takeName(LT);
419 copyMetadataForLoad(*NewI, *LT);
420
421 IC.InsertNewInstWith(NewI, LT->getIterator());
422 IC.replaceInstUsesWith(*LT, NewI);
423 // LT has actually been replaced by NewI. It is useless to insert LT into
424 // the map. Instead, we insert NewI into the map to indicate this is the
425 // replacement (new value).
426 WorkMap[NewI] = NewI;
427 } else if (auto *PHI = dyn_cast<PHINode>(I)) {
428 // Create a new PHI by replacing any incoming value that is a user of the
429 // root pointer and has a replacement.
430 Value *V = WorkMap.lookup(PHI->getIncomingValue(0));
431 PHI->mutateType(V ? V->getType() : PHI->getIncomingValue(0)->getType());
432 for (unsigned int I = 0; I < PHI->getNumIncomingValues(); ++I) {
433 Value *V = WorkMap.lookup(PHI->getIncomingValue(I));
434 PHI->setIncomingValue(I, V ? V : PHI->getIncomingValue(I));
435 }
436 WorkMap[PHI] = PHI;
437 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
438 auto *V = getReplacement(GEP->getPointerOperand());
439 assert(V && "Operand not replaced");
440 SmallVector<Value *, 8> Indices(GEP->indices());
441 auto *NewI =
442 GetElementPtrInst::Create(GEP->getSourceElementType(), V, Indices);
443 IC.InsertNewInstWith(NewI, GEP->getIterator());
444 NewI->takeName(GEP);
445 NewI->setNoWrapFlags(GEP->getNoWrapFlags());
446 WorkMap[GEP] = NewI;
447 } else if (auto *SI = dyn_cast<SelectInst>(I)) {
448 Value *TrueValue = SI->getTrueValue();
449 Value *FalseValue = SI->getFalseValue();
450 if (Value *Replacement = getReplacement(TrueValue))
451 TrueValue = Replacement;
452 if (Value *Replacement = getReplacement(FalseValue))
453 FalseValue = Replacement;
454 auto *NewSI = SelectInst::Create(SI->getCondition(), TrueValue, FalseValue,
455 SI->getName(), nullptr, SI);
456 IC.InsertNewInstWith(NewSI, SI->getIterator());
457 NewSI->takeName(SI);
458 WorkMap[SI] = NewSI;
459 } else if (auto *MemCpy = dyn_cast<MemTransferInst>(I)) {
460 auto *DestV = MemCpy->getRawDest();
461 auto *SrcV = MemCpy->getRawSource();
462
463 if (auto *DestReplace = getReplacement(DestV))
464 DestV = DestReplace;
465 if (auto *SrcReplace = getReplacement(SrcV))
466 SrcV = SrcReplace;
467
468 IC.Builder.SetInsertPoint(MemCpy);
469 auto *NewI = IC.Builder.CreateMemTransferInst(
470 MemCpy->getIntrinsicID(), DestV, MemCpy->getDestAlign(), SrcV,
471 MemCpy->getSourceAlign(), MemCpy->getLength(), MemCpy->isVolatile());
472 AAMDNodes AAMD = MemCpy->getAAMetadata();
473 if (AAMD)
474 NewI->setAAMetadata(AAMD);
475
476 IC.eraseInstFromFunction(*MemCpy);
477 WorkMap[MemCpy] = NewI;
478 } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(I)) {
479 auto *V = getReplacement(ASC->getPointerOperand());
480 assert(V && "Operand not replaced");
481 assert(isEqualOrValidAddrSpaceCast(
482 ASC, V->getType()->getPointerAddressSpace()) &&
483 "Invalid address space cast!");
484
485 if (V->getType()->getPointerAddressSpace() !=
486 ASC->getType()->getPointerAddressSpace()) {
487 auto *NewI = new AddrSpaceCastInst(V, ASC->getType(), "");
488 NewI->takeName(ASC);
489 IC.InsertNewInstWith(NewI, ASC->getIterator());
490 WorkMap[ASC] = NewI;
491 } else {
492 WorkMap[ASC] = V;
493 }
494
495 } else {
496 llvm_unreachable("should never reach here");
497 }
498}
499
501 if (auto *I = simplifyAllocaArraySize(*this, AI, DT))
502 return I;
503
504 // Move all alloca's of zero byte objects to the entry block and merge them
505 // together. Note that we only do this for alloca's, because malloc should
506 // allocate and return a unique pointer, even for a zero byte allocation.
507 std::optional<TypeSize> Size = AI.getAllocationSize(DL);
508 if (Size && Size->isZero()) {
509 // For a zero sized alloca there is no point in doing an array allocation.
510 // This is helpful if the array size is a complicated expression not used
511 // elsewhere.
512 if (AI.isArrayAllocation())
513 return replaceOperand(AI, 0,
514 ConstantInt::get(AI.getArraySize()->getType(), 1));
515
516 // Get the first instruction in the entry block.
517 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
518 BasicBlock::iterator FirstInst = EntryBlock.getFirstNonPHIOrDbg();
519 if (&*FirstInst != &AI) {
520 // If the entry block doesn't start with a zero-size alloca then move
521 // this one to the start of the entry block. There is no problem with
522 // dominance as the array size was forced to a constant earlier already.
523 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
524 std::optional<TypeSize> EntryAISize =
525 EntryAI ? EntryAI->getAllocationSize(DL) : std::nullopt;
526 if (!EntryAISize || !EntryAISize->isZero()) {
527 AI.moveBefore(FirstInst);
528 return &AI;
529 }
530
531 // Replace this zero-sized alloca with the one at the start of the entry
532 // block after ensuring that the address will be aligned enough for both
533 // types.
534 const Align MaxAlign = std::max(EntryAI->getAlign(), AI.getAlign());
535 EntryAI->setAlignment(MaxAlign);
536 return replaceInstUsesWith(AI, EntryAI);
537 }
538 }
539
540 // Check to see if this allocation is only modified by a memcpy/memmove from
541 // a memory location whose alignment is equal to or exceeds that of the
542 // allocation. If this is the case, we can change all users to use the
543 // constant memory location instead. This is commonly produced by the CFE by
544 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
545 // is only subsequently read.
547 if (MemTransferInst *Copy = isOnlyCopiedFromConstantMemory(AA, &AI, ToDelete)) {
548 Value *TheSrc = Copy->getSource();
549 Align AllocaAlign = AI.getAlign();
550 Align SourceAlign = getOrEnforceKnownAlignment(
551 TheSrc, AllocaAlign, DL, &AI, &AC, &DT);
552 if (AllocaAlign <= SourceAlign &&
553 isDereferenceableForAllocaSize(TheSrc, &AI, DL) &&
554 !isa<Instruction>(TheSrc)) {
555 // FIXME: Can we sink instructions without violating dominance when TheSrc
556 // is an instruction instead of a constant or argument?
557 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
558 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
559 unsigned SrcAddrSpace = TheSrc->getType()->getPointerAddressSpace();
560 if (AI.getAddressSpace() == SrcAddrSpace) {
561 for (Instruction *Delete : ToDelete)
562 eraseInstFromFunction(*Delete);
563
564 Instruction *NewI = replaceInstUsesWith(AI, TheSrc);
566 ++NumGlobalCopies;
567 return NewI;
568 }
569
570 PointerReplacer PtrReplacer(*this, AI, SrcAddrSpace);
571 if (PtrReplacer.collectUsers()) {
572 for (Instruction *Delete : ToDelete)
573 eraseInstFromFunction(*Delete);
574
575 PtrReplacer.replacePointer(TheSrc);
576 ++NumGlobalCopies;
577 }
578 }
579 }
580
581 // At last, use the generic allocation site handler to aggressively remove
582 // unused allocas.
583 return visitAllocSite(AI);
584}
585
586// Are we allowed to form a atomic load or store of this type?
587static bool isSupportedAtomicType(Type *Ty) {
588 return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
589}
590
591/// Helper to combine a load to a new type.
592///
593/// This just does the work of combining a load to a new type. It handles
594/// metadata, etc., and returns the new instruction. The \c NewTy should be the
595/// loaded *value* type. This will convert it to a pointer, cast the operand to
596/// that pointer type, load it, etc.
597///
598/// Note that this will create all of the instructions with whatever insert
599/// point the \c InstCombinerImpl currently is using.
601 const Twine &Suffix) {
602 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
603 "can't fold an atomic load to requested type");
604
605 LoadInst *NewLoad =
606 Builder.CreateAlignedLoad(NewTy, LI.getPointerOperand(), LI.getAlign(),
607 LI.isVolatile(), LI.getName() + Suffix);
608 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
609 copyMetadataForLoad(*NewLoad, LI);
610 return NewLoad;
611}
612
613/// Combine a store to a new type.
614///
615/// Returns the newly created store instruction.
617 Value *V) {
618 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
619 "can't fold an atomic store of requested type");
620
621 Value *Ptr = SI.getPointerOperand();
623 SI.getAllMetadata(MD);
624
625 StoreInst *NewStore =
626 IC.Builder.CreateAlignedStore(V, Ptr, SI.getAlign(), SI.isVolatile());
627 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
628 for (const auto &MDPair : MD) {
629 unsigned ID = MDPair.first;
630 MDNode *N = MDPair.second;
631 // Note, essentially every kind of metadata should be preserved here! This
632 // routine is supposed to clone a store instruction changing *only its
633 // type*. The only metadata it makes sense to drop is metadata which is
634 // invalidated when the pointer type changes. This should essentially
635 // never be the case in LLVM, but we explicitly switch over only known
636 // metadata to be conservatively correct. If you are adding metadata to
637 // LLVM which pertains to stores, you almost certainly want to add it
638 // here.
639 switch (ID) {
640 case LLVMContext::MD_dbg:
641 case LLVMContext::MD_DIAssignID:
642 case LLVMContext::MD_tbaa:
643 case LLVMContext::MD_prof:
644 case LLVMContext::MD_fpmath:
645 case LLVMContext::MD_tbaa_struct:
646 case LLVMContext::MD_alias_scope:
647 case LLVMContext::MD_noalias:
648 case LLVMContext::MD_nontemporal:
649 case LLVMContext::MD_mem_parallel_loop_access:
650 case LLVMContext::MD_access_group:
651 // All of these directly apply.
652 NewStore->setMetadata(ID, N);
653 break;
654 case LLVMContext::MD_invariant_load:
655 case LLVMContext::MD_nonnull:
656 case LLVMContext::MD_noundef:
657 case LLVMContext::MD_range:
658 case LLVMContext::MD_align:
659 case LLVMContext::MD_dereferenceable:
660 case LLVMContext::MD_dereferenceable_or_null:
661 // These don't apply for stores.
662 break;
663 }
664 }
665
666 return NewStore;
667}
668
669/// Combine loads to match the type of their uses' value after looking
670/// through intervening bitcasts.
671///
672/// The core idea here is that if the result of a load is used in an operation,
673/// we should load the type most conducive to that operation. For example, when
674/// loading an integer and converting that immediately to a pointer, we should
675/// instead directly load a pointer.
676///
677/// However, this routine must never change the width of a load or the number of
678/// loads as that would introduce a semantic change. This combine is expected to
679/// be a semantic no-op which just allows loads to more closely model the types
680/// of their consuming operations.
681///
682/// Currently, we also refuse to change the precise type used for an atomic load
683/// or a volatile load. This is debatable, and might be reasonable to change
684/// later. However, it is risky in case some backend or other part of LLVM is
685/// relying on the exact type loaded to select appropriate atomic operations.
687 LoadInst &Load) {
688 // FIXME: We could probably with some care handle both volatile and ordered
689 // atomic loads here but it isn't clear that this is important.
690 if (!Load.isUnordered())
691 return nullptr;
692
693 if (Load.use_empty())
694 return nullptr;
695
696 // swifterror values can't be bitcasted.
697 if (Load.getPointerOperand()->isSwiftError())
698 return nullptr;
699
700 // Fold away bit casts of the loaded value by loading the desired type.
701 // Note that we should not do this for pointer<->integer casts,
702 // because that would result in type punning.
703 if (Load.hasOneUse()) {
704 // Don't transform when the type is x86_amx, it makes the pass that lower
705 // x86_amx type happy.
706 Type *LoadTy = Load.getType();
707 if (auto *BC = dyn_cast<BitCastInst>(Load.user_back())) {
708 assert(!LoadTy->isX86_AMXTy() && "Load from x86_amx* should not happen!");
709 if (BC->getType()->isX86_AMXTy())
710 return nullptr;
711 }
712
713 if (auto *CastUser = dyn_cast<CastInst>(Load.user_back())) {
714 Type *DestTy = CastUser->getDestTy();
715 if (CastUser->isNoopCast(IC.getDataLayout()) &&
716 LoadTy->isPtrOrPtrVectorTy() == DestTy->isPtrOrPtrVectorTy() &&
717 (!Load.isAtomic() || isSupportedAtomicType(DestTy))) {
718 LoadInst *NewLoad = IC.combineLoadToNewType(Load, DestTy);
719 CastUser->replaceAllUsesWith(NewLoad);
720 IC.eraseInstFromFunction(*CastUser);
721 return &Load;
722 }
723 }
724 }
725
726 // FIXME: We should also canonicalize loads of vectors when their elements are
727 // cast to other types.
728 return nullptr;
729}
730
732 // FIXME: We could probably with some care handle both volatile and atomic
733 // stores here but it isn't clear that this is important.
734 if (!LI.isSimple())
735 return nullptr;
736
737 Type *T = LI.getType();
738 if (!T->isAggregateType())
739 return nullptr;
740
741 StringRef Name = LI.getName();
742
743 if (auto *ST = dyn_cast<StructType>(T)) {
744 // If the struct only have one element, we unpack.
745 auto NumElements = ST->getNumElements();
746 if (NumElements == 1) {
747 LoadInst *NewLoad = IC.combineLoadToNewType(LI, ST->getTypeAtIndex(0U),
748 ".unpack");
749 NewLoad->setAAMetadata(LI.getAAMetadata());
750 // Copy invariant metadata from parent load.
751 NewLoad->copyMetadata(LI, LLVMContext::MD_invariant_load);
753 PoisonValue::get(T), NewLoad, 0, Name));
754 }
755
756 // We don't want to break loads with padding here as we'd loose
757 // the knowledge that padding exists for the rest of the pipeline.
758 const DataLayout &DL = IC.getDataLayout();
759 auto *SL = DL.getStructLayout(ST);
760
761 if (SL->hasPadding())
762 return nullptr;
763
764 const auto Align = LI.getAlign();
765 auto *Addr = LI.getPointerOperand();
766 auto *IdxType = DL.getIndexType(Addr->getType());
767
769 for (unsigned i = 0; i < NumElements; i++) {
770 auto *Ptr = IC.Builder.CreateInBoundsPtrAdd(
771 Addr, IC.Builder.CreateTypeSize(IdxType, SL->getElementOffset(i)),
772 Name + ".elt");
773 auto *L = IC.Builder.CreateAlignedLoad(
774 ST->getElementType(i), Ptr,
775 commonAlignment(Align, SL->getElementOffset(i).getKnownMinValue()),
776 Name + ".unpack");
777 // Propagate AA metadata. It'll still be valid on the narrowed load.
778 L->setAAMetadata(LI.getAAMetadata());
779 // Copy invariant metadata from parent load.
780 L->copyMetadata(LI, LLVMContext::MD_invariant_load);
781 V = IC.Builder.CreateInsertValue(V, L, i);
782 }
783
784 V->setName(Name);
785 return IC.replaceInstUsesWith(LI, V);
786 }
787
788 if (auto *AT = dyn_cast<ArrayType>(T)) {
789 auto *ET = AT->getElementType();
790 auto NumElements = AT->getNumElements();
791 if (NumElements == 1) {
792 LoadInst *NewLoad = IC.combineLoadToNewType(LI, ET, ".unpack");
793 NewLoad->setAAMetadata(LI.getAAMetadata());
795 PoisonValue::get(T), NewLoad, 0, Name));
796 }
797
798 // Bail out if the array is too large. Ideally we would like to optimize
799 // arrays of arbitrary size but this has a terrible impact on compile time.
800 // The threshold here is chosen arbitrarily, maybe needs a little bit of
801 // tuning.
802 if (NumElements > IC.MaxArraySizeForCombine)
803 return nullptr;
804
805 const DataLayout &DL = IC.getDataLayout();
806 TypeSize EltSize = DL.getTypeAllocSize(ET);
807 const auto Align = LI.getAlign();
808
809 auto *Addr = LI.getPointerOperand();
810 auto *IdxType = Type::getInt64Ty(T->getContext());
811 auto *Zero = ConstantInt::get(IdxType, 0);
812
815 for (uint64_t i = 0; i < NumElements; i++) {
816 Value *Indices[2] = {
817 Zero,
818 ConstantInt::get(IdxType, i),
819 };
820 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, ArrayRef(Indices),
821 Name + ".elt");
822 auto EltAlign = commonAlignment(Align, Offset.getKnownMinValue());
823 auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr,
824 EltAlign, Name + ".unpack");
825 L->setAAMetadata(LI.getAAMetadata());
826 V = IC.Builder.CreateInsertValue(V, L, i);
827 Offset += EltSize;
828 }
829
830 V->setName(Name);
831 return IC.replaceInstUsesWith(LI, V);
832 }
833
834 return nullptr;
835}
836
837// If we can determine that all possible objects pointed to by the provided
838// pointer value are, not only dereferenceable, but also definitively less than
839// or equal to the provided maximum size, then return true. Otherwise, return
840// false (constant global values and allocas fall into this category).
841//
842// FIXME: This should probably live in ValueTracking (or similar).
844 const DataLayout &DL) {
846 SmallVector<Value *, 4> Worklist(1, V);
847
848 do {
849 Value *P = Worklist.pop_back_val();
850 P = P->stripPointerCasts();
851
852 if (!Visited.insert(P).second)
853 continue;
854
856 Worklist.push_back(SI->getTrueValue());
857 Worklist.push_back(SI->getFalseValue());
858 continue;
859 }
860
861 if (PHINode *PN = dyn_cast<PHINode>(P)) {
862 append_range(Worklist, PN->incoming_values());
863 continue;
864 }
865
867 if (GA->isInterposable())
868 return false;
869 Worklist.push_back(GA->getAliasee());
870 continue;
871 }
872
873 // If we know how big this object is, and it is less than MaxSize, continue
874 // searching. Otherwise, return false.
875 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
876 std::optional<TypeSize> AllocSize = AI->getAllocationSize(DL);
877 if (!AllocSize || AllocSize->isScalable() ||
878 AllocSize->getFixedValue() > MaxSize)
879 return false;
880 continue;
881 }
882
884 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
885 return false;
886
887 uint64_t InitSize = GV->getGlobalSize(DL);
888 if (InitSize > MaxSize)
889 return false;
890 continue;
891 }
892
893 return false;
894 } while (!Worklist.empty());
895
896 return true;
897}
898
899// If we're indexing into an object of a known size, and the outer index is
900// not a constant, but having any value but zero would lead to undefined
901// behavior, replace it with zero.
902//
903// For example, if we have:
904// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
905// ...
906// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
907// ... = load i32* %arrayidx, align 4
908// Then we know that we can replace %x in the GEP with i64 0.
909//
910// FIXME: We could fold any GEP index to zero that would cause UB if it were
911// not zero. Currently, we only handle the first such index. Also, we could
912// also search through non-zero constant indices if we kept track of the
913// offsets those indices implied.
915 GetElementPtrInst *GEPI, Instruction *MemI,
916 unsigned &Idx) {
917 if (GEPI->getNumOperands() < 2)
918 return false;
919
920 // Find the first non-zero index of a GEP. If all indices are zero, return
921 // one past the last index.
922 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
923 unsigned I = 1;
924 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
925 Value *V = GEPI->getOperand(I);
926 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
927 if (CI->isZero())
928 continue;
929
930 break;
931 }
932
933 return I;
934 };
935
936 // Skip through initial 'zero' indices, and find the corresponding pointer
937 // type. See if the next index is not a constant.
938 Idx = FirstNZIdx(GEPI);
939 if (Idx == GEPI->getNumOperands())
940 return false;
941 if (isa<Constant>(GEPI->getOperand(Idx)))
942 return false;
943
944 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
945 Type *SourceElementType = GEPI->getSourceElementType();
946 // Size information about scalable vectors is not available, so we cannot
947 // deduce whether indexing at n is undefined behaviour or not. Bail out.
948 if (SourceElementType->isScalableTy())
949 return false;
950
951 Type *AllocTy = GetElementPtrInst::getIndexedType(SourceElementType, Ops);
952 if (!AllocTy || !AllocTy->isSized())
953 return false;
954 const DataLayout &DL = IC.getDataLayout();
955 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy).getFixedValue();
956
957 // If there are more indices after the one we might replace with a zero, make
958 // sure they're all non-negative. If any of them are negative, the overall
959 // address being computed might be before the base address determined by the
960 // first non-zero index.
961 auto IsAllNonNegative = [&]() {
962 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
963 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), MemI);
964 if (Known.isNonNegative())
965 continue;
966 return false;
967 }
968
969 return true;
970 };
971
972 // FIXME: If the GEP is not inbounds, and there are extra indices after the
973 // one we'll replace, those could cause the address computation to wrap
974 // (rendering the IsAllNonNegative() check below insufficient). We can do
975 // better, ignoring zero indices (and other indices we can prove small
976 // enough not to wrap).
977 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
978 return false;
979
980 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
981 // also known to be dereferenceable.
982 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
983 IsAllNonNegative();
984}
985
986// If we're indexing into an object with a variable index for the memory
987// access, but the object has only one element, we can assume that the index
988// will always be zero. If we replace the GEP, return it.
990 Instruction &MemI) {
992 unsigned Idx;
993 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
994 Instruction *NewGEPI = GEPI->clone();
995 NewGEPI->setOperand(Idx,
996 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
997 IC.InsertNewInstBefore(NewGEPI, GEPI->getIterator());
998 return NewGEPI;
999 }
1000 }
1001
1002 return nullptr;
1003}
1004
1006 if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
1007 return false;
1008
1009 auto *Ptr = SI.getPointerOperand();
1011 Ptr = GEPI->getOperand(0);
1012 return (isa<ConstantPointerNull>(Ptr) &&
1013 !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()));
1014}
1015
1018 const Value *GEPI0 = GEPI->getOperand(0);
1019 if (isa<ConstantPointerNull>(GEPI0) &&
1020 !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace()))
1021 return true;
1022 }
1023 if (isa<UndefValue>(Op) ||
1026 return true;
1027 return false;
1028}
1029
1030Value *InstCombinerImpl::simplifyNonNullOperand(Value *V,
1031 bool HasDereferenceable,
1032 unsigned Depth) {
1033 if (auto *Sel = dyn_cast<SelectInst>(V)) {
1034 if (isa<ConstantPointerNull>(Sel->getOperand(1)))
1035 return Sel->getOperand(2);
1036
1037 if (isa<ConstantPointerNull>(Sel->getOperand(2)))
1038 return Sel->getOperand(1);
1039 }
1040
1041 if (!V->hasOneUse())
1042 return nullptr;
1043
1044 constexpr unsigned RecursionLimit = 3;
1045 if (Depth == RecursionLimit)
1046 return nullptr;
1047
1048 if (auto *GEP = dyn_cast<GetElementPtrInst>(V)) {
1049 if (HasDereferenceable || GEP->isInBounds()) {
1050 if (auto *Res = simplifyNonNullOperand(GEP->getPointerOperand(),
1051 HasDereferenceable, Depth + 1)) {
1052 replaceOperand(*GEP, 0, Res);
1054 return nullptr;
1055 }
1056 }
1057 }
1058
1059 if (auto *PHI = dyn_cast<PHINode>(V)) {
1060 bool Changed = false;
1061 for (Use &U : PHI->incoming_values()) {
1062 // We set Depth to RecursionLimit to avoid expensive recursion.
1063 if (auto *Res = simplifyNonNullOperand(U.get(), HasDereferenceable,
1064 RecursionLimit)) {
1065 replaceUse(U, Res);
1066 Changed = true;
1067 }
1068 }
1069 if (Changed)
1071 return nullptr;
1072 }
1073
1074 return nullptr;
1075}
1076
1078 Value *Op = LI.getOperand(0);
1079 if (Value *Res = simplifyLoadInst(&LI, Op, SQ.getWithInstruction(&LI)))
1080 return replaceInstUsesWith(LI, Res);
1081
1082 // Try to canonicalize the loaded type.
1083 if (Instruction *Res = combineLoadToOperationType(*this, LI))
1084 return Res;
1085
1086 // Replace GEP indices if possible.
1087 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI))
1088 return replaceOperand(LI, 0, NewGEPI);
1089
1090 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
1091 return Res;
1092
1093 // Do really simple store-to-load forwarding and load CSE, to catch cases
1094 // where there are several consecutive memory accesses to the same location,
1095 // separated by a few arithmetic operations.
1096 bool IsLoadCSE = false;
1097 BatchAAResults BatchAA(*AA);
1098 if (Value *AvailableVal = FindAvailableLoadedValue(&LI, BatchAA, &IsLoadCSE)) {
1099 if (IsLoadCSE)
1100 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false);
1101
1102 return replaceInstUsesWith(
1103 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
1104 LI.getName() + ".cast"));
1105 }
1106
1107 // None of the following transforms are legal for volatile/ordered atomic
1108 // loads. Most of them do apply for unordered atomics.
1109 if (!LI.isUnordered()) return nullptr;
1110
1111 // load(gep null, ...) -> unreachable
1112 // load null/undef -> unreachable
1113 // TODO: Consider a target hook for valid address spaces for this xforms.
1114 if (canSimplifyNullLoadOrGEP(LI, Op)) {
1117 }
1118
1119 if (Op->hasOneUse()) {
1120 // Change select and PHI nodes to select values instead of addresses: this
1121 // helps alias analysis out a lot, allows many others simplifications, and
1122 // exposes redundancy in the code.
1123 //
1124 // Note that we cannot do the transformation unless we know that the
1125 // introduced loads cannot trap! Something like this is valid as long as
1126 // the condition is always false: load (select bool %C, int* null, int* %G),
1127 // but it would not be valid if we transformed it to load from null
1128 // unconditionally.
1129 //
1130
1132 Value *SelectOp = Op;
1133 if (ASC && ASC->getOperand(0)->hasOneUse())
1134 SelectOp = ASC->getOperand(0);
1135 if (SelectInst *SI = dyn_cast<SelectInst>(SelectOp)) {
1136 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
1137 // or
1138 // load (addrspacecast(select (Cond, &V1, &V2))) -->
1139 // select(Cond, load (addrspacecast(&V1)), load (addrspacecast(&V2))).
1140 Align Alignment = LI.getAlign();
1141 if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(),
1142 Alignment, DL, SI) &&
1143 isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(),
1144 Alignment, DL, SI)) {
1145
1146 auto MaybeCastedLoadOperand = [&](Value *Op) {
1147 if (ASC)
1148 return Builder.CreateAddrSpaceCast(Op, ASC->getType(),
1149 Op->getName() + ".cast");
1150 return Op;
1151 };
1152 Value *LoadOp1 = MaybeCastedLoadOperand(SI->getOperand(1));
1153 LoadInst *V1 = Builder.CreateLoad(LI.getType(), LoadOp1,
1154 LoadOp1->getName() + ".val");
1155
1156 Value *LoadOp2 = MaybeCastedLoadOperand(SI->getOperand(2));
1157 LoadInst *V2 = Builder.CreateLoad(LI.getType(), LoadOp2,
1158 LoadOp2->getName() + ".val");
1159 assert(LI.isUnordered() && "implied by above");
1160 V1->setAlignment(Alignment);
1161 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1162 V2->setAlignment(Alignment);
1163 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1164 // It is safe to copy any metadata that does not trigger UB. Copy any
1165 // poison-generating metadata.
1168 return SelectInst::Create(SI->getCondition(), V1, V2, "", nullptr,
1169 ProfcheckDisableMetadataFixes ? nullptr : SI);
1170 }
1171 }
1172 }
1173
1175 if (Value *V = simplifyNonNullOperand(Op, /*HasDereferenceable=*/true))
1176 return replaceOperand(LI, 0, V);
1177
1178 return nullptr;
1179}
1180
1181/// Look for extractelement/insertvalue sequence that acts like a bitcast.
1182///
1183/// \returns underlying value that was "cast", or nullptr otherwise.
1184///
1185/// For example, if we have:
1186///
1187/// %E0 = extractelement <2 x double> %U, i32 0
1188/// %V0 = insertvalue [2 x double] undef, double %E0, 0
1189/// %E1 = extractelement <2 x double> %U, i32 1
1190/// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1191///
1192/// and the layout of a <2 x double> is isomorphic to a [2 x double],
1193/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1194/// Note that %U may contain non-undef values where %V1 has undef.
1196 Value *U = nullptr;
1197 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1198 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1199 if (!E)
1200 return nullptr;
1201 auto *W = E->getVectorOperand();
1202 if (!U)
1203 U = W;
1204 else if (U != W)
1205 return nullptr;
1206 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1207 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1208 return nullptr;
1209 V = IV->getAggregateOperand();
1210 }
1211 if (!match(V, m_Undef()) || !U)
1212 return nullptr;
1213
1214 auto *UT = cast<VectorType>(U->getType());
1215 auto *VT = V->getType();
1216 // Check that types UT and VT are bitwise isomorphic.
1217 const auto &DL = IC.getDataLayout();
1218 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1219 return nullptr;
1220 }
1221 if (auto *AT = dyn_cast<ArrayType>(VT)) {
1222 if (AT->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1223 return nullptr;
1224 } else {
1225 auto *ST = cast<StructType>(VT);
1226 if (ST->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1227 return nullptr;
1228 for (const auto *EltT : ST->elements()) {
1229 if (EltT != UT->getElementType())
1230 return nullptr;
1231 }
1232 }
1233 return U;
1234}
1235
1236/// Combine stores to match the type of value being stored.
1237///
1238/// The core idea here is that the memory does not have any intrinsic type and
1239/// where we can we should match the type of a store to the type of value being
1240/// stored.
1241///
1242/// However, this routine must never change the width of a store or the number of
1243/// stores as that would introduce a semantic change. This combine is expected to
1244/// be a semantic no-op which just allows stores to more closely model the types
1245/// of their incoming values.
1246///
1247/// Currently, we also refuse to change the precise type used for an atomic or
1248/// volatile store. This is debatable, and might be reasonable to change later.
1249/// However, it is risky in case some backend or other part of LLVM is relying
1250/// on the exact type stored to select appropriate atomic operations.
1251///
1252/// \returns true if the store was successfully combined away. This indicates
1253/// the caller must erase the store instruction. We have to let the caller erase
1254/// the store instruction as otherwise there is no way to signal whether it was
1255/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1257 // FIXME: We could probably with some care handle both volatile and ordered
1258 // atomic stores here but it isn't clear that this is important.
1259 if (!SI.isUnordered())
1260 return false;
1261
1262 // swifterror values can't be bitcasted.
1263 if (SI.getPointerOperand()->isSwiftError())
1264 return false;
1265
1266 Value *V = SI.getValueOperand();
1267
1268 // Fold away bit casts of the stored value by storing the original type.
1269 if (auto *BC = dyn_cast<BitCastInst>(V)) {
1270 assert(!BC->getType()->isX86_AMXTy() &&
1271 "store to x86_amx* should not happen!");
1272 V = BC->getOperand(0);
1273 // Don't transform when the type is x86_amx, it makes the pass that lower
1274 // x86_amx type happy.
1275 if (V->getType()->isX86_AMXTy())
1276 return false;
1277 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1278 combineStoreToNewValue(IC, SI, V);
1279 return true;
1280 }
1281 }
1282
1283 if (Value *U = likeBitCastFromVector(IC, V))
1284 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1285 combineStoreToNewValue(IC, SI, U);
1286 return true;
1287 }
1288
1289 // FIXME: We should also canonicalize stores of vectors when their elements
1290 // are cast to other types.
1291 return false;
1292}
1293
1295 // FIXME: We could probably with some care handle both volatile and atomic
1296 // stores here but it isn't clear that this is important.
1297 if (!SI.isSimple())
1298 return false;
1299
1300 Value *V = SI.getValueOperand();
1301 Type *T = V->getType();
1302
1303 if (!T->isAggregateType())
1304 return false;
1305
1306 if (auto *ST = dyn_cast<StructType>(T)) {
1307 // If the struct only have one element, we unpack.
1308 unsigned Count = ST->getNumElements();
1309 if (Count == 1) {
1310 V = IC.Builder.CreateExtractValue(V, 0);
1311 combineStoreToNewValue(IC, SI, V);
1312 return true;
1313 }
1314
1315 // We don't want to break loads with padding here as we'd loose
1316 // the knowledge that padding exists for the rest of the pipeline.
1317 const DataLayout &DL = IC.getDataLayout();
1318 auto *SL = DL.getStructLayout(ST);
1319
1320 if (SL->hasPadding())
1321 return false;
1322
1323 const auto Align = SI.getAlign();
1324
1325 SmallString<16> EltName = V->getName();
1326 EltName += ".elt";
1327 auto *Addr = SI.getPointerOperand();
1328 SmallString<16> AddrName = Addr->getName();
1329 AddrName += ".repack";
1330
1331 auto *IdxType = DL.getIndexType(Addr->getType());
1332 for (unsigned i = 0; i < Count; i++) {
1333 auto *Ptr = IC.Builder.CreateInBoundsPtrAdd(
1334 Addr, IC.Builder.CreateTypeSize(IdxType, SL->getElementOffset(i)),
1335 AddrName);
1336 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1337 auto EltAlign =
1338 commonAlignment(Align, SL->getElementOffset(i).getKnownMinValue());
1339 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1340 NS->setAAMetadata(SI.getAAMetadata());
1341 }
1342
1343 return true;
1344 }
1345
1346 if (auto *AT = dyn_cast<ArrayType>(T)) {
1347 // If the array only have one element, we unpack.
1348 auto NumElements = AT->getNumElements();
1349 if (NumElements == 1) {
1350 V = IC.Builder.CreateExtractValue(V, 0);
1351 combineStoreToNewValue(IC, SI, V);
1352 return true;
1353 }
1354
1355 // Bail out if the array is too large. Ideally we would like to optimize
1356 // arrays of arbitrary size but this has a terrible impact on compile time.
1357 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1358 // tuning.
1359 if (NumElements > IC.MaxArraySizeForCombine)
1360 return false;
1361
1362 const DataLayout &DL = IC.getDataLayout();
1363 TypeSize EltSize = DL.getTypeAllocSize(AT->getElementType());
1364 const auto Align = SI.getAlign();
1365
1366 SmallString<16> EltName = V->getName();
1367 EltName += ".elt";
1368 auto *Addr = SI.getPointerOperand();
1369 SmallString<16> AddrName = Addr->getName();
1370 AddrName += ".repack";
1371
1372 auto *IdxType = Type::getInt64Ty(T->getContext());
1373 auto *Zero = ConstantInt::get(IdxType, 0);
1374
1376 for (uint64_t i = 0; i < NumElements; i++) {
1377 Value *Indices[2] = {
1378 Zero,
1379 ConstantInt::get(IdxType, i),
1380 };
1381 auto *Ptr =
1382 IC.Builder.CreateInBoundsGEP(AT, Addr, ArrayRef(Indices), AddrName);
1383 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1384 auto EltAlign = commonAlignment(Align, Offset.getKnownMinValue());
1385 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1386 NS->setAAMetadata(SI.getAAMetadata());
1387 Offset += EltSize;
1388 }
1389
1390 return true;
1391 }
1392
1393 return false;
1394}
1395
1396/// equivalentAddressValues - Test if A and B will obviously have the same
1397/// value. This includes recognizing that %t0 and %t1 will have the same
1398/// value in code like this:
1399/// %t0 = getelementptr \@a, 0, 3
1400/// store i32 0, i32* %t0
1401/// %t1 = getelementptr \@a, 0, 3
1402/// %t2 = load i32* %t1
1403///
1405 // Test if the values are trivially equivalent.
1406 if (A == B) return true;
1407
1408 // Test if the values come form identical arithmetic instructions.
1409 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1410 // its only used to compare two uses within the same basic block, which
1411 // means that they'll always either have the same value or one of them
1412 // will have an undefined value.
1413 if (isa<BinaryOperator>(A) ||
1414 isa<CastInst>(A) ||
1415 isa<PHINode>(A) ||
1418 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1419 return true;
1420
1421 // Otherwise they may not be equivalent.
1422 return false;
1423}
1424
1426 Value *Val = SI.getOperand(0);
1427 Value *Ptr = SI.getOperand(1);
1428
1429 // Try to canonicalize the stored type.
1430 if (combineStoreToValueType(*this, SI))
1431 return eraseInstFromFunction(SI);
1432
1433 // Try to canonicalize the stored type.
1434 if (unpackStoreToAggregate(*this, SI))
1435 return eraseInstFromFunction(SI);
1436
1437 // Replace GEP indices if possible.
1438 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI))
1439 return replaceOperand(SI, 1, NewGEPI);
1440
1441 // Don't hack volatile/ordered stores.
1442 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1443 if (!SI.isUnordered()) return nullptr;
1444
1445 // If the RHS is an alloca with a single use, zapify the store, making the
1446 // alloca dead.
1447 if (Ptr->hasOneUse()) {
1448 if (isa<AllocaInst>(Ptr))
1449 return eraseInstFromFunction(SI);
1451 if (isa<AllocaInst>(GEP->getOperand(0))) {
1452 if (GEP->getOperand(0)->hasOneUse())
1453 return eraseInstFromFunction(SI);
1454 }
1455 }
1456 }
1457
1458 // If we have a store to a location which is known constant, we can conclude
1459 // that the store must be storing the constant value (else the memory
1460 // wouldn't be constant), and this must be a noop.
1461 if (!isModSet(AA->getModRefInfoMask(Ptr)))
1462 return eraseInstFromFunction(SI);
1463
1464 // Do really simple DSE, to catch cases where there are several consecutive
1465 // stores to the same location, separated by a few arithmetic operations. This
1466 // situation often occurs with bitfield accesses.
1468 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1469 --ScanInsts) {
1470 --BBI;
1471 // Don't count debug info directives, lest they affect codegen,
1472 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1473 if (BBI->isDebugOrPseudoInst()) {
1474 ScanInsts++;
1475 continue;
1476 }
1477
1478 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1479 // Prev store isn't volatile, and stores to the same location?
1480 if (PrevSI->isUnordered() &&
1481 equivalentAddressValues(PrevSI->getOperand(1), SI.getOperand(1)) &&
1482 PrevSI->getValueOperand()->getType() ==
1483 SI.getValueOperand()->getType()) {
1484 ++NumDeadStore;
1485 // Manually add back the original store to the worklist now, so it will
1486 // be processed after the operands of the removed store, as this may
1487 // expose additional DSE opportunities.
1488 Worklist.push(&SI);
1489 eraseInstFromFunction(*PrevSI);
1490 return nullptr;
1491 }
1492 break;
1493 }
1494
1495 // If this is a load, we have to stop. However, if the loaded value is from
1496 // the pointer we're loading and is producing the pointer we're storing,
1497 // then *this* store is dead (X = load P; store X -> P).
1498 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1499 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1500 assert(SI.isUnordered() && "can't eliminate ordering operation");
1501 return eraseInstFromFunction(SI);
1502 }
1503
1504 // Otherwise, this is a load from some other location. Stores before it
1505 // may not be dead.
1506 break;
1507 }
1508
1509 // Don't skip over loads, throws or things that can modify memory.
1510 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1511 break;
1512 }
1513
1514 // store X, null -> turns into 'unreachable' in SimplifyCFG
1515 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1517 if (!isa<PoisonValue>(Val))
1518 return replaceOperand(SI, 0, PoisonValue::get(Val->getType()));
1519 return nullptr; // Do not modify these!
1520 }
1521
1522 // This is a non-terminator unreachable marker. Don't remove it.
1523 if (isa<UndefValue>(Ptr)) {
1524 // Remove guaranteed-to-transfer instructions before the marker.
1526
1527 // Remove all instructions after the marker and handle dead blocks this
1528 // implies.
1530 handleUnreachableFrom(SI.getNextNode(), Worklist);
1532 return nullptr;
1533 }
1534
1535 // store undef, Ptr -> noop
1536 // FIXME: This is technically incorrect because it might overwrite a poison
1537 // value. Change to PoisonValue once #52930 is resolved.
1538 if (isa<UndefValue>(Val))
1539 return eraseInstFromFunction(SI);
1540
1541 if (!NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
1542 if (Value *V = simplifyNonNullOperand(Ptr, /*HasDereferenceable=*/true))
1543 return replaceOperand(SI, 1, V);
1544
1545 return nullptr;
1546}
1547
1548/// Try to transform:
1549/// if () { *P = v1; } else { *P = v2 }
1550/// or:
1551/// *P = v1; if () { *P = v2; }
1552/// into a phi node with a store in the successor.
1554 if (!SI.isUnordered())
1555 return false; // This code has not been audited for volatile/ordered case.
1556
1557 // Check if the successor block has exactly 2 incoming edges.
1558 BasicBlock *StoreBB = SI.getParent();
1559 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1560 if (!DestBB->hasNPredecessors(2))
1561 return false;
1562
1563 // Capture the other block (the block that doesn't contain our store).
1564 pred_iterator PredIter = pred_begin(DestBB);
1565 if (*PredIter == StoreBB)
1566 ++PredIter;
1567 BasicBlock *OtherBB = *PredIter;
1568
1569 // Bail out if all of the relevant blocks aren't distinct. This can happen,
1570 // for example, if SI is in an infinite loop.
1571 if (StoreBB == DestBB || OtherBB == DestBB)
1572 return false;
1573
1574 // Verify that the other block ends in a branch and is not otherwise empty.
1575 BasicBlock::iterator BBI(OtherBB->getTerminator());
1576 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1577 if (!OtherBr || BBI == OtherBB->begin())
1578 return false;
1579
1580 auto OtherStoreIsMergeable = [&](StoreInst *OtherStore) -> bool {
1581 if (!OtherStore ||
1582 OtherStore->getPointerOperand() != SI.getPointerOperand())
1583 return false;
1584
1585 auto *SIVTy = SI.getValueOperand()->getType();
1586 auto *OSVTy = OtherStore->getValueOperand()->getType();
1587 return CastInst::isBitOrNoopPointerCastable(OSVTy, SIVTy, DL) &&
1588 SI.hasSameSpecialState(OtherStore);
1589 };
1590
1591 // If the other block ends in an unconditional branch, check for the 'if then
1592 // else' case. There is an instruction before the branch.
1593 StoreInst *OtherStore = nullptr;
1594 if (OtherBr->isUnconditional()) {
1595 --BBI;
1596 // Skip over debugging info and pseudo probes.
1597 while (BBI->isDebugOrPseudoInst()) {
1598 if (BBI==OtherBB->begin())
1599 return false;
1600 --BBI;
1601 }
1602 // If this isn't a store, isn't a store to the same location, or is not the
1603 // right kind of store, bail out.
1604 OtherStore = dyn_cast<StoreInst>(BBI);
1605 if (!OtherStoreIsMergeable(OtherStore))
1606 return false;
1607 } else {
1608 // Otherwise, the other block ended with a conditional branch. If one of the
1609 // destinations is StoreBB, then we have the if/then case.
1610 if (OtherBr->getSuccessor(0) != StoreBB &&
1611 OtherBr->getSuccessor(1) != StoreBB)
1612 return false;
1613
1614 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1615 // if/then triangle. See if there is a store to the same ptr as SI that
1616 // lives in OtherBB.
1617 for (;; --BBI) {
1618 // Check to see if we find the matching store.
1619 OtherStore = dyn_cast<StoreInst>(BBI);
1620 if (OtherStoreIsMergeable(OtherStore))
1621 break;
1622
1623 // If we find something that may be using or overwriting the stored
1624 // value, or if we run out of instructions, we can't do the transform.
1625 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1626 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1627 return false;
1628 }
1629
1630 // In order to eliminate the store in OtherBr, we have to make sure nothing
1631 // reads or overwrites the stored value in StoreBB.
1632 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1633 // FIXME: This should really be AA driven.
1634 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1635 return false;
1636 }
1637 }
1638
1639 // Insert a PHI node now if we need it.
1640 Value *MergedVal = OtherStore->getValueOperand();
1641 // The debug locations of the original instructions might differ. Merge them.
1642 DebugLoc MergedLoc =
1643 DebugLoc::getMergedLocation(SI.getDebugLoc(), OtherStore->getDebugLoc());
1644 if (MergedVal != SI.getValueOperand()) {
1645 PHINode *PN =
1646 PHINode::Create(SI.getValueOperand()->getType(), 2, "storemerge");
1647 PN->addIncoming(SI.getValueOperand(), SI.getParent());
1648 Builder.SetInsertPoint(OtherStore);
1649 PN->addIncoming(Builder.CreateBitOrPointerCast(MergedVal, PN->getType()),
1650 OtherBB);
1651 MergedVal = InsertNewInstBefore(PN, DestBB->begin());
1652 PN->setDebugLoc(MergedLoc);
1653 }
1654
1655 // Advance to a place where it is safe to insert the new store and insert it.
1656 BBI = DestBB->getFirstInsertionPt();
1657 StoreInst *NewSI =
1658 new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), SI.getAlign(),
1659 SI.getOrdering(), SI.getSyncScopeID());
1660 InsertNewInstBefore(NewSI, BBI);
1661 NewSI->setDebugLoc(MergedLoc);
1662 NewSI->mergeDIAssignID({&SI, OtherStore});
1663
1664 // If the two stores had AA tags, merge them.
1665 AAMDNodes AATags = SI.getAAMetadata();
1666 if (AATags)
1667 NewSI->setAAMetadata(AATags.merge(OtherStore->getAAMetadata()));
1668
1669 // Nuke the old stores.
1671 eraseInstFromFunction(*OtherStore);
1672 return true;
1673}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void addToWorklist(Instruction &I, SmallVector< Instruction *, 4 > &Worklist)
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides internal interfaces used to implement the InstCombine.
static StoreInst * combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI, Value *V)
Combine a store to a new type.
static Instruction * combineLoadToOperationType(InstCombinerImpl &IC, LoadInst &Load)
Combine loads to match the type of their uses' value after looking through intervening bitcasts.
static Instruction * replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr, Instruction &MemI)
static Instruction * simplifyAllocaArraySize(InstCombinerImpl &IC, AllocaInst &AI, DominatorTree &DT)
static bool canSimplifyNullStoreOrGEP(StoreInst &SI)
static bool equivalentAddressValues(Value *A, Value *B)
equivalentAddressValues - Test if A and B will obviously have the same value.
static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC, GetElementPtrInst *GEPI, Instruction *MemI, unsigned &Idx)
static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op)
static bool isSupportedAtomicType(Type *Ty)
static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, const DataLayout &DL)
Returns true if V is dereferenceable for size of alloca.
static Instruction * unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI)
static cl::opt< unsigned > MaxCopiedFromConstantUsers("instcombine-max-copied-from-constant-users", cl::init(300), cl::desc("Maximum users to visit in copy from constant transform"), cl::Hidden)
static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI)
Combine stores to match the type of value being stored.
static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI)
static Value * likeBitCastFromVector(InstCombinerImpl &IC, Value *V)
Look for extractelement/insertvalue sequence that acts like a bitcast.
static bool isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V, MemTransferInst *&TheCopy, SmallVectorImpl< Instruction * > &ToDelete)
isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived) pointer to an alloca.
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, const DataLayout &DL)
This file provides the interface for the instcombine pass implementation.
@ RecursionLimit
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define I(x, y, z)
Definition MD5.cpp:57
This file implements a map that provides insertion order iteration.
#define T
#define P(N)
This file defines the SmallString class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const uint32_t IV[8]
Definition blake3_impl.h:83
Class for arbitrary precision integers.
Definition APInt.h:78
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:470
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
LLVM_ABI bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
A debug info location.
Definition DebugLoc.h:123
static LLVM_ABI DebugLoc getMergedLocation(DebugLoc LocA, DebugLoc LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
Definition DebugLoc.cpp:179
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Type * getSourceElementType() const
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition IRBuilder.h:1838
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition IRBuilder.h:2609
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition IRBuilder.h:1872
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition IRBuilder.h:2602
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition IRBuilder.h:1953
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2250
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition IRBuilder.h:1891
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Definition IRBuilder.h:2031
LLVM_ABI CallInst * CreateMemTransferInst(Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, Value *Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitLoadInst(LoadInst &LI)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitStoreInst(StoreInst &SI)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
bool removeInstructionsBeforeUnreachable(Instruction &I)
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitAllocaInst(AllocaInst &AI)
SimplifyQuery SQ
const DataLayout & getDataLayout() const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
const DataLayout & DL
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
AssumptionCache & AC
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
BuilderTy & Builder
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI bool isLifetimeStartOrEnd() const LLVM_READONLY
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
LLVM_ABI void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
void setAlignment(Align Align)
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
bool isUnordered() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
bool isSimple() const
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1080
This class wraps the llvm.memcpy/memmove intrinsics.
static constexpr const unsigned PoisonGeneratingIDs[]
Metadata IDs that may generate poison.
Definition Metadata.h:146
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PointerIntPair - This class implements a pair of a pointer and small integer.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
bool contains(const_arg_type key) const
Check if the SetVector contains the given key.
Definition SetVector.h:252
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
size_type size() const
Definition SmallPtrSet.h:99
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Value * getValueOperand()
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getZero()
Definition TypeSize.h:349
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:297
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isX86_AMXTy() const
Return true if this is X86 AMX.
Definition Type.h:200
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
void setOperand(unsigned i, Value *Val)
Definition User.h:212
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
iterator_range< use_iterator > uses()
Definition Value.h:380
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
auto m_Undef()
Match an arbitrary undef constant.
initializer< Ty > init(const Ty &Val)
LLVM_ABI bool isAvailable()
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition Loads.cpp:229
LLVM_ABI void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
Definition Local.cpp:3120
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2198
LLVM_ABI Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
Definition Loads.cpp:542
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition Local.cpp:1579
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition Loads.cpp:435
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2427
LLVM_ABI Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition Local.cpp:3111
void replace(R &&Range, const T &OldValue, const T &NewValue)
Provide wrappers to std::replace which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1908
DWARFExpression::Operation Op
PredIterator< BasicBlock, Value::user_iterator > pred_iterator
Definition CFG.h:105
ArrayRef(const T &OneElt) -> ArrayRef< T >
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
Definition Metadata.cpp:64
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
LLVM_ABI AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:108