LLVM 23.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APFloat.h"
37#include "llvm/ADT/APInt.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/DenseMap.h"
42#include "llvm/ADT/Statistic.h"
47#include "llvm/Analysis/CFG.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DIBuilder.h"
67#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/DebugInfo.h"
70#include "llvm/IR/Dominators.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Operator.h"
82#include "llvm/IR/PassManager.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/Use.h"
86#include "llvm/IR/User.h"
87#include "llvm/IR/Value.h"
88#include "llvm/IR/ValueHandle.h"
93#include "llvm/Support/Debug.h"
102#include <algorithm>
103#include <cassert>
104#include <cstdint>
105#include <memory>
106#include <optional>
107#include <string>
108#include <utility>
109
110#define DEBUG_TYPE "instcombine"
112#include <optional>
113
114using namespace llvm;
115using namespace llvm::PatternMatch;
116
117STATISTIC(NumWorklistIterations,
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration, "Number of functions with one iteration");
120STATISTIC(NumTwoIterations, "Number of functions with two iterations");
121STATISTIC(NumThreeIterations, "Number of functions with three iterations");
122STATISTIC(NumFourOrMoreIterations,
123 "Number of functions with four or more iterations");
124
125STATISTIC(NumCombined , "Number of insts combined");
126STATISTIC(NumConstProp, "Number of constant folds");
127STATISTIC(NumDeadInst , "Number of dead inst eliminated");
128STATISTIC(NumSunkInst , "Number of instructions sunk");
129STATISTIC(NumExpand, "Number of expansions");
130STATISTIC(NumFactor , "Number of factorizations");
131STATISTIC(NumReassoc , "Number of reassociations");
132DEBUG_COUNTER(VisitCounter, "instcombine-visit",
133 "Controls which instructions are visited");
134
135static cl::opt<bool> EnableCodeSinking("instcombine-code-sinking",
136 cl::desc("Enable code sinking"),
137 cl::init(true));
138
140 "instcombine-max-sink-users", cl::init(32),
141 cl::desc("Maximum number of undroppable users for instruction sinking"));
142
144MaxArraySize("instcombine-maxarray-size", cl::init(1024),
145 cl::desc("Maximum array size considered when doing a combine"));
146
147namespace llvm {
149} // end namespace llvm
150
151// FIXME: Remove this flag when it is no longer necessary to convert
152// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
153// increases variable availability at the cost of accuracy. Variables that
154// cannot be promoted by mem2reg or SROA will be described as living in memory
155// for their entire lifetime. However, passes like DSE and instcombine can
156// delete stores to the alloca, leading to misleading and inaccurate debug
157// information. This flag can be removed when those passes are fixed.
158static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
159 cl::Hidden, cl::init(true));
160
161std::optional<Instruction *>
163 // Handle target specific intrinsics
164 if (II.getCalledFunction()->isTargetIntrinsic()) {
165 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
166 }
167 return std::nullopt;
168}
169
171 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
172 bool &KnownBitsComputed) {
173 // Handle target specific intrinsics
174 if (II.getCalledFunction()->isTargetIntrinsic()) {
175 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
176 *this, II, DemandedMask, Known, KnownBitsComputed);
177 }
178 return std::nullopt;
179}
180
182 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
183 APInt &PoisonElts2, APInt &PoisonElts3,
184 std::function<void(Instruction *, unsigned, APInt, APInt &)>
185 SimplifyAndSetOp) {
186 // Handle target specific intrinsics
187 if (II.getCalledFunction()->isTargetIntrinsic()) {
188 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
189 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
190 SimplifyAndSetOp);
191 }
192 return std::nullopt;
193}
194
195bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
196 // Approved exception for TTI use: This queries a legality property of the
197 // target, not an profitability heuristic. Ideally this should be part of
198 // DataLayout instead.
199 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
200}
201
202Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
203 if (!RewriteGEP)
204 return llvm::emitGEPOffset(&Builder, DL, GEP);
205
206 IRBuilderBase::InsertPointGuard Guard(Builder);
207 auto *Inst = dyn_cast<Instruction>(GEP);
208 if (Inst)
209 Builder.SetInsertPoint(Inst);
210
211 Value *Offset = EmitGEPOffset(GEP);
212 // Rewrite non-trivial GEPs to avoid duplicating the offset arithmetic.
213 if (Inst && !GEP->hasAllConstantIndices() &&
214 !GEP->getSourceElementType()->isIntegerTy(8)) {
216 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
217 Offset, "", GEP->getNoWrapFlags()));
219 }
220 return Offset;
221}
222
223Value *InstCombinerImpl::EmitGEPOffsets(ArrayRef<GEPOperator *> GEPs,
224 GEPNoWrapFlags NW, Type *IdxTy,
225 bool RewriteGEPs) {
226 auto Add = [&](Value *Sum, Value *Offset) -> Value * {
227 if (Sum)
228 return Builder.CreateAdd(Sum, Offset, "", NW.hasNoUnsignedWrap(),
229 NW.isInBounds());
230 else
231 return Offset;
232 };
233
234 Value *Sum = nullptr;
235 Value *OneUseSum = nullptr;
236 Value *OneUseBase = nullptr;
237 GEPNoWrapFlags OneUseFlags = GEPNoWrapFlags::all();
238 for (GEPOperator *GEP : reverse(GEPs)) {
239 Value *Offset;
240 {
241 // Expand the offset at the point of the previous GEP to enable rewriting.
242 // However, use the original insertion point for calculating Sum.
243 IRBuilderBase::InsertPointGuard Guard(Builder);
244 auto *Inst = dyn_cast<Instruction>(GEP);
245 if (RewriteGEPs && Inst)
246 Builder.SetInsertPoint(Inst);
247
249 if (Offset->getType() != IdxTy)
250 Offset = Builder.CreateVectorSplat(
251 cast<VectorType>(IdxTy)->getElementCount(), Offset);
252 if (GEP->hasOneUse()) {
253 // Offsets of one-use GEPs will be merged into the next multi-use GEP.
254 OneUseSum = Add(OneUseSum, Offset);
255 OneUseFlags = OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags());
256 if (!OneUseBase)
257 OneUseBase = GEP->getPointerOperand();
258 continue;
259 }
260
261 if (OneUseSum)
262 Offset = Add(OneUseSum, Offset);
263
264 // Rewrite the GEP to reuse the computed offset. This also includes
265 // offsets from preceding one-use GEPs.
266 if (RewriteGEPs && Inst &&
267 !(GEP->getSourceElementType()->isIntegerTy(8) &&
268 GEP->getOperand(1) == Offset)) {
270 *Inst,
271 Builder.CreatePtrAdd(
272 OneUseBase ? OneUseBase : GEP->getPointerOperand(), Offset, "",
273 OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags())));
275 }
276 }
277
278 Sum = Add(Sum, Offset);
279 OneUseSum = OneUseBase = nullptr;
280 OneUseFlags = GEPNoWrapFlags::all();
281 }
282 if (OneUseSum)
283 Sum = Add(Sum, OneUseSum);
284 if (!Sum)
285 return Constant::getNullValue(IdxTy);
286 return Sum;
287}
288
289/// Legal integers and common types are considered desirable. This is used to
290/// avoid creating instructions with types that may not be supported well by the
291/// the backend.
292/// NOTE: This treats i8, i16 and i32 specially because they are common
293/// types in frontend languages.
294bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
295 switch (BitWidth) {
296 case 8:
297 case 16:
298 case 32:
299 return true;
300 default:
301 return DL.isLegalInteger(BitWidth);
302 }
303}
304
305/// Return true if it is desirable to convert an integer computation from a
306/// given bit width to a new bit width.
307/// We don't want to convert from a legal or desirable type (like i8) to an
308/// illegal type or from a smaller to a larger illegal type. A width of '1'
309/// is always treated as a desirable type because i1 is a fundamental type in
310/// IR, and there are many specialized optimizations for i1 types.
311/// Common/desirable widths are equally treated as legal to convert to, in
312/// order to open up more combining opportunities.
313bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
314 unsigned ToWidth) const {
315 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
316 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
317
318 // Convert to desirable widths even if they are not legal types.
319 // Only shrink types, to prevent infinite loops.
320 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
321 return true;
322
323 // If this is a legal or desiable integer from type, and the result would be
324 // an illegal type, don't do the transformation.
325 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
326 return false;
327
328 // Otherwise, if both are illegal, do not increase the size of the result. We
329 // do allow things like i160 -> i64, but not i64 -> i160.
330 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
331 return false;
332
333 return true;
334}
335
336/// Return true if it is desirable to convert a computation from 'From' to 'To'.
337/// We don't want to convert from a legal to an illegal type or from a smaller
338/// to a larger illegal type. i1 is always treated as a legal type because it is
339/// a fundamental type in IR, and there are many specialized optimizations for
340/// i1 types.
341bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
342 // TODO: This could be extended to allow vectors. Datalayout changes might be
343 // needed to properly support that.
344 if (!From->isIntegerTy() || !To->isIntegerTy())
345 return false;
346
347 unsigned FromWidth = From->getPrimitiveSizeInBits();
348 unsigned ToWidth = To->getPrimitiveSizeInBits();
349 return shouldChangeType(FromWidth, ToWidth);
350}
351
352// Return true, if No Signed Wrap should be maintained for I.
353// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
354// where both B and C should be ConstantInts, results in a constant that does
355// not overflow. This function only handles the Add/Sub/Mul opcodes. For
356// all other opcodes, the function conservatively returns false.
359 if (!OBO || !OBO->hasNoSignedWrap())
360 return false;
361
362 const APInt *BVal, *CVal;
363 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
364 return false;
365
366 // We reason about Add/Sub/Mul Only.
367 bool Overflow = false;
368 switch (I.getOpcode()) {
369 case Instruction::Add:
370 (void)BVal->sadd_ov(*CVal, Overflow);
371 break;
372 case Instruction::Sub:
373 (void)BVal->ssub_ov(*CVal, Overflow);
374 break;
375 case Instruction::Mul:
376 (void)BVal->smul_ov(*CVal, Overflow);
377 break;
378 default:
379 // Conservatively return false for other opcodes.
380 return false;
381 }
382 return !Overflow;
383}
384
387 return OBO && OBO->hasNoUnsignedWrap();
388}
389
392 return OBO && OBO->hasNoSignedWrap();
393}
394
395/// Conservatively clears subclassOptionalData after a reassociation or
396/// commutation. We preserve fast-math flags when applicable as they can be
397/// preserved.
400 if (!FPMO) {
401 I.clearSubclassOptionalData();
402 return;
403 }
404
405 FastMathFlags FMF = I.getFastMathFlags();
406 I.clearSubclassOptionalData();
407 I.setFastMathFlags(FMF);
408}
409
410/// Combine constant operands of associative operations either before or after a
411/// cast to eliminate one of the associative operations:
412/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
413/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
415 InstCombinerImpl &IC) {
416 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
417 if (!Cast || !Cast->hasOneUse())
418 return false;
419
420 // TODO: Enhance logic for other casts and remove this check.
421 auto CastOpcode = Cast->getOpcode();
422 if (CastOpcode != Instruction::ZExt)
423 return false;
424
425 // TODO: Enhance logic for other BinOps and remove this check.
426 if (!BinOp1->isBitwiseLogicOp())
427 return false;
428
429 auto AssocOpcode = BinOp1->getOpcode();
430 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
431 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
432 return false;
433
434 Constant *C1, *C2;
435 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
436 !match(BinOp2->getOperand(1), m_Constant(C2)))
437 return false;
438
439 // TODO: This assumes a zext cast.
440 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
441 // to the destination type might lose bits.
442
443 // Fold the constants together in the destination type:
444 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
445 const DataLayout &DL = IC.getDataLayout();
446 Type *DestTy = C1->getType();
447 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
448 if (!CastC2)
449 return false;
450 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
451 if (!FoldedC)
452 return false;
453
454 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
455 IC.replaceOperand(*BinOp1, 1, FoldedC);
457 Cast->dropPoisonGeneratingFlags();
458 return true;
459}
460
461// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
462// inttoptr ( ptrtoint (x) ) --> x
463Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
464 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
465 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
466 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
467 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
468 Type *CastTy = IntToPtr->getDestTy();
469 if (PtrToInt &&
470 CastTy->getPointerAddressSpace() ==
471 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
472 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
473 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
474 return PtrToInt->getOperand(0);
475 }
476 return nullptr;
477}
478
479/// This performs a few simplifications for operators that are associative or
480/// commutative:
481///
482/// Commutative operators:
483///
484/// 1. Order operands such that they are listed from right (least complex) to
485/// left (most complex). This puts constants before unary operators before
486/// binary operators.
487///
488/// Associative operators:
489///
490/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
491/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
492///
493/// Associative and commutative operators:
494///
495/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
496/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
497/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
498/// if C1 and C2 are constants.
500 Instruction::BinaryOps Opcode = I.getOpcode();
501 bool Changed = false;
502
503 do {
504 // Order operands such that they are listed from right (least complex) to
505 // left (most complex). This puts constants before unary operators before
506 // binary operators.
507 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
508 getComplexity(I.getOperand(1)))
509 Changed = !I.swapOperands();
510
511 if (I.isCommutative()) {
512 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
513 replaceOperand(I, 0, Pair->first);
514 replaceOperand(I, 1, Pair->second);
515 Changed = true;
516 }
517 }
518
519 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
520 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
521
522 if (I.isAssociative()) {
523 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
524 if (Op0 && Op0->getOpcode() == Opcode) {
525 Value *A = Op0->getOperand(0);
526 Value *B = Op0->getOperand(1);
527 Value *C = I.getOperand(1);
528
529 // Does "B op C" simplify?
530 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
531 // It simplifies to V. Form "A op V".
532 replaceOperand(I, 0, A);
533 replaceOperand(I, 1, V);
534 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
535 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
536
537 // Conservatively clear all optional flags since they may not be
538 // preserved by the reassociation. Reset nsw/nuw based on the above
539 // analysis.
541
542 // Note: this is only valid because SimplifyBinOp doesn't look at
543 // the operands to Op0.
544 if (IsNUW)
545 I.setHasNoUnsignedWrap(true);
546
547 if (IsNSW)
548 I.setHasNoSignedWrap(true);
549
550 Changed = true;
551 ++NumReassoc;
552 continue;
553 }
554 }
555
556 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
557 if (Op1 && Op1->getOpcode() == Opcode) {
558 Value *A = I.getOperand(0);
559 Value *B = Op1->getOperand(0);
560 Value *C = Op1->getOperand(1);
561
562 // Does "A op B" simplify?
563 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
564 // It simplifies to V. Form "V op C".
565 replaceOperand(I, 0, V);
566 replaceOperand(I, 1, C);
567 // Conservatively clear the optional flags, since they may not be
568 // preserved by the reassociation.
570 Changed = true;
571 ++NumReassoc;
572 continue;
573 }
574 }
575 }
576
577 if (I.isAssociative() && I.isCommutative()) {
578 if (simplifyAssocCastAssoc(&I, *this)) {
579 Changed = true;
580 ++NumReassoc;
581 continue;
582 }
583
584 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
585 if (Op0 && Op0->getOpcode() == Opcode) {
586 Value *A = Op0->getOperand(0);
587 Value *B = Op0->getOperand(1);
588 Value *C = I.getOperand(1);
589
590 // Does "C op A" simplify?
591 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
592 // It simplifies to V. Form "V op B".
593 replaceOperand(I, 0, V);
594 replaceOperand(I, 1, B);
595 // Conservatively clear the optional flags, since they may not be
596 // preserved by the reassociation.
598 Changed = true;
599 ++NumReassoc;
600 continue;
601 }
602 }
603
604 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
605 if (Op1 && Op1->getOpcode() == Opcode) {
606 Value *A = I.getOperand(0);
607 Value *B = Op1->getOperand(0);
608 Value *C = Op1->getOperand(1);
609
610 // Does "C op A" simplify?
611 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
612 // It simplifies to V. Form "B op V".
613 replaceOperand(I, 0, B);
614 replaceOperand(I, 1, V);
615 // Conservatively clear the optional flags, since they may not be
616 // preserved by the reassociation.
618 Changed = true;
619 ++NumReassoc;
620 continue;
621 }
622 }
623
624 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
625 // if C1 and C2 are constants.
626 Value *A, *B;
627 Constant *C1, *C2, *CRes;
628 if (Op0 && Op1 &&
629 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
630 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
631 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
632 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
633 bool IsNUW = hasNoUnsignedWrap(I) &&
634 hasNoUnsignedWrap(*Op0) &&
635 hasNoUnsignedWrap(*Op1);
636 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
637 BinaryOperator::CreateNUW(Opcode, A, B) :
638 BinaryOperator::Create(Opcode, A, B);
639
640 if (isa<FPMathOperator>(NewBO)) {
641 FastMathFlags Flags = I.getFastMathFlags() &
642 Op0->getFastMathFlags() &
643 Op1->getFastMathFlags();
644 NewBO->setFastMathFlags(Flags);
645 }
646 InsertNewInstWith(NewBO, I.getIterator());
647 NewBO->takeName(Op1);
648 replaceOperand(I, 0, NewBO);
649 replaceOperand(I, 1, CRes);
650 // Conservatively clear the optional flags, since they may not be
651 // preserved by the reassociation.
653 if (IsNUW)
654 I.setHasNoUnsignedWrap(true);
655
656 Changed = true;
657 continue;
658 }
659 }
660
661 // No further simplifications.
662 return Changed;
663 } while (true);
664}
665
666/// Return whether "X LOp (Y ROp Z)" is always equal to
667/// "(X LOp Y) ROp (X LOp Z)".
670 // X & (Y | Z) <--> (X & Y) | (X & Z)
671 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
672 if (LOp == Instruction::And)
673 return ROp == Instruction::Or || ROp == Instruction::Xor;
674
675 // X | (Y & Z) <--> (X | Y) & (X | Z)
676 if (LOp == Instruction::Or)
677 return ROp == Instruction::And;
678
679 // X * (Y + Z) <--> (X * Y) + (X * Z)
680 // X * (Y - Z) <--> (X * Y) - (X * Z)
681 if (LOp == Instruction::Mul)
682 return ROp == Instruction::Add || ROp == Instruction::Sub;
683
684 return false;
685}
686
687/// Return whether "(X LOp Y) ROp Z" is always equal to
688/// "(X ROp Z) LOp (Y ROp Z)".
692 return leftDistributesOverRight(ROp, LOp);
693
694 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
696
697 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
698 // but this requires knowing that the addition does not overflow and other
699 // such subtleties.
700}
701
702/// This function returns identity value for given opcode, which can be used to
703/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
705 if (isa<Constant>(V))
706 return nullptr;
707
708 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
709}
710
711/// This function predicates factorization using distributive laws. By default,
712/// it just returns the 'Op' inputs. But for special-cases like
713/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
714/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
715/// allow more factorization opportunities.
718 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
719 assert(Op && "Expected a binary operator");
720 LHS = Op->getOperand(0);
721 RHS = Op->getOperand(1);
722 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
723 Constant *C;
724 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
725 // X << C --> X * (1 << C)
727 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
728 assert(RHS && "Constant folding of immediate constants failed");
729 return Instruction::Mul;
730 }
731 // TODO: We can add other conversions e.g. shr => div etc.
732 }
733 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
734 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
736 // lshr nneg C, X --> ashr nneg C, X
737 return Instruction::AShr;
738 }
739 }
740 return Op->getOpcode();
741}
742
743/// This tries to simplify binary operations by factorizing out common terms
744/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
747 Instruction::BinaryOps InnerOpcode, Value *A,
748 Value *B, Value *C, Value *D) {
749 assert(A && B && C && D && "All values must be provided");
750
751 Value *V = nullptr;
752 Value *RetVal = nullptr;
753 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
754 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
755
756 // Does "X op' Y" always equal "Y op' X"?
757 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
758
759 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
760 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
761 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
762 // commutative case, "(A op' B) op (C op' A)"?
763 if (A == C || (InnerCommutative && A == D)) {
764 if (A != C)
765 std::swap(C, D);
766 // Consider forming "A op' (B op D)".
767 // If "B op D" simplifies then it can be formed with no cost.
768 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
769
770 // If "B op D" doesn't simplify then only go on if one of the existing
771 // operations "A op' B" and "C op' D" will be zapped as no longer used.
772 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
773 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
774 if (V)
775 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
776 }
777 }
778
779 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
780 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
781 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
782 // commutative case, "(A op' B) op (B op' D)"?
783 if (B == D || (InnerCommutative && B == C)) {
784 if (B != D)
785 std::swap(C, D);
786 // Consider forming "(A op C) op' B".
787 // If "A op C" simplifies then it can be formed with no cost.
788 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
789
790 // If "A op C" doesn't simplify then only go on if one of the existing
791 // operations "A op' B" and "C op' D" will be zapped as no longer used.
792 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
793 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
794 if (V)
795 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
796 }
797 }
798
799 if (!RetVal)
800 return nullptr;
801
802 ++NumFactor;
803 RetVal->takeName(&I);
804
805 // Try to add no-overflow flags to the final value.
806 if (isa<BinaryOperator>(RetVal)) {
807 bool HasNSW = false;
808 bool HasNUW = false;
810 HasNSW = I.hasNoSignedWrap();
811 HasNUW = I.hasNoUnsignedWrap();
812 }
813 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
814 HasNSW &= LOBO->hasNoSignedWrap();
815 HasNUW &= LOBO->hasNoUnsignedWrap();
816 }
817
818 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
819 HasNSW &= ROBO->hasNoSignedWrap();
820 HasNUW &= ROBO->hasNoUnsignedWrap();
821 }
822
823 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
824 // We can propagate 'nsw' if we know that
825 // %Y = mul nsw i16 %X, C
826 // %Z = add nsw i16 %Y, %X
827 // =>
828 // %Z = mul nsw i16 %X, C+1
829 //
830 // iff C+1 isn't INT_MIN
831 const APInt *CInt;
832 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
833 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
834
835 // nuw can be propagated with any constant or nuw value.
836 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
837 }
838 }
839 return RetVal;
840}
841
842// If `I` has one Const operand and the other matches `(ctpop (not x))`,
843// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
844// This is only useful is the new subtract can fold so we only handle the
845// following cases:
846// 1) (add/sub/disjoint_or C, (ctpop (not x))
847// -> (add/sub/disjoint_or C', (ctpop x))
848// 1) (cmp pred C, (ctpop (not x))
849// -> (cmp pred C', (ctpop x))
851 unsigned Opc = I->getOpcode();
852 unsigned ConstIdx = 1;
853 switch (Opc) {
854 default:
855 return nullptr;
856 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
857 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
858 // is constant.
859 case Instruction::Sub:
860 ConstIdx = 0;
861 break;
862 case Instruction::ICmp:
863 // Signed predicates aren't correct in some edge cases like for i2 types, as
864 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
865 // comparisons against it are simplfied to unsigned.
866 if (cast<ICmpInst>(I)->isSigned())
867 return nullptr;
868 break;
869 case Instruction::Or:
870 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
871 return nullptr;
872 [[fallthrough]];
873 case Instruction::Add:
874 break;
875 }
876
877 Value *Op;
878 // Find ctpop.
879 if (!match(I->getOperand(1 - ConstIdx),
881 return nullptr;
882
883 Constant *C;
884 // Check other operand is ImmConstant.
885 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
886 return nullptr;
887
888 Type *Ty = Op->getType();
889 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
890 // Need extra check for icmp. Note if this check is true, it generally means
891 // the icmp will simplify to true/false.
892 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
893 Constant *Cmp =
895 if (!Cmp || !Cmp->isZeroValue())
896 return nullptr;
897 }
898
899 // Check we can invert `(not x)` for free.
900 bool Consumes = false;
901 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
902 return nullptr;
903 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
904 assert(NotOp != nullptr &&
905 "Desync between isFreeToInvert and getFreelyInverted");
906
907 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
908
909 Value *R = nullptr;
910
911 // Do the transformation here to avoid potentially introducing an infinite
912 // loop.
913 switch (Opc) {
914 case Instruction::Sub:
915 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
916 break;
917 case Instruction::Or:
918 case Instruction::Add:
919 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
920 break;
921 case Instruction::ICmp:
922 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
923 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
924 break;
925 default:
926 llvm_unreachable("Unhandled Opcode");
927 }
928 assert(R != nullptr);
929 return replaceInstUsesWith(*I, R);
930}
931
932// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
933// IFF
934// 1) the logic_shifts match
935// 2) either both binops are binops and one is `and` or
936// BinOp1 is `and`
937// (logic_shift (inv_logic_shift C1, C), C) == C1 or
938//
939// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
940//
941// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
942// IFF
943// 1) the logic_shifts match
944// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
945//
946// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
947//
948// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
949// IFF
950// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
951// 2) Binop2 is `not`
952//
953// -> (arithmetic_shift Binop1((not X), Y), Amt)
954
956 const DataLayout &DL = I.getDataLayout();
957 auto IsValidBinOpc = [](unsigned Opc) {
958 switch (Opc) {
959 default:
960 return false;
961 case Instruction::And:
962 case Instruction::Or:
963 case Instruction::Xor:
964 case Instruction::Add:
965 // Skip Sub as we only match constant masks which will canonicalize to use
966 // add.
967 return true;
968 }
969 };
970
971 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
972 // constraints.
973 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
974 unsigned ShOpc) {
975 assert(ShOpc != Instruction::AShr);
976 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
977 ShOpc == Instruction::Shl;
978 };
979
980 auto GetInvShift = [](unsigned ShOpc) {
981 assert(ShOpc != Instruction::AShr);
982 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
983 };
984
985 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
986 unsigned ShOpc, Constant *CMask,
987 Constant *CShift) {
988 // If the BinOp1 is `and` we don't need to check the mask.
989 if (BinOpc1 == Instruction::And)
990 return true;
991
992 // For all other possible transfers we need complete distributable
993 // binop/shift (anything but `add` + `lshr`).
994 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
995 return false;
996
997 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
998 // vecs, otherwise the mask will be simplified and the following check will
999 // handle it).
1000 if (BinOpc2 == Instruction::And)
1001 return true;
1002
1003 // Otherwise, need mask that meets the below requirement.
1004 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
1005 Constant *MaskInvShift =
1006 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1007 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
1008 CMask;
1009 };
1010
1011 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
1012 Constant *CMask, *CShift;
1013 Value *X, *Y, *ShiftedX, *Mask, *Shift;
1014 if (!match(I.getOperand(ShOpnum),
1015 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
1016 return nullptr;
1017 if (!match(I.getOperand(1 - ShOpnum),
1019 m_OneUse(m_Shift(m_Value(X), m_Specific(Shift))),
1020 m_Value(ShiftedX)),
1021 m_Value(Mask))))
1022 return nullptr;
1023 // Make sure we are matching instruction shifts and not ConstantExpr
1024 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
1025 auto *IX = dyn_cast<Instruction>(ShiftedX);
1026 if (!IY || !IX)
1027 return nullptr;
1028
1029 // LHS and RHS need same shift opcode
1030 unsigned ShOpc = IY->getOpcode();
1031 if (ShOpc != IX->getOpcode())
1032 return nullptr;
1033
1034 // Make sure binop is real instruction and not ConstantExpr
1035 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
1036 if (!BO2)
1037 return nullptr;
1038
1039 unsigned BinOpc = BO2->getOpcode();
1040 // Make sure we have valid binops.
1041 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
1042 return nullptr;
1043
1044 if (ShOpc == Instruction::AShr) {
1045 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
1046 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
1047 Value *NotX = Builder.CreateNot(X);
1048 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
1050 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
1051 }
1052
1053 return nullptr;
1054 }
1055
1056 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
1057 // distribute to drop the shift irrelevant of constants.
1058 if (BinOpc == I.getOpcode() &&
1059 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
1060 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
1061 Value *NewBinOp1 = Builder.CreateBinOp(
1062 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
1063 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
1064 }
1065
1066 // Otherwise we can only distribute by constant shifting the mask, so
1067 // ensure we have constants.
1068 if (!match(Shift, m_ImmConstant(CShift)))
1069 return nullptr;
1070 if (!match(Mask, m_ImmConstant(CMask)))
1071 return nullptr;
1072
1073 // Check if we can distribute the binops.
1074 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1075 return nullptr;
1076
1077 Constant *NewCMask =
1078 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1079 Value *NewBinOp2 = Builder.CreateBinOp(
1080 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1081 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1082 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1083 NewBinOp1, CShift);
1084 };
1085
1086 if (Instruction *R = MatchBinOp(0))
1087 return R;
1088 return MatchBinOp(1);
1089}
1090
1091// (Binop (zext C), (select C, T, F))
1092// -> (select C, (binop 1, T), (binop 0, F))
1093//
1094// (Binop (sext C), (select C, T, F))
1095// -> (select C, (binop -1, T), (binop 0, F))
1096//
1097// Attempt to simplify binary operations into a select with folded args, when
1098// one operand of the binop is a select instruction and the other operand is a
1099// zext/sext extension, whose value is the select condition.
1102 // TODO: this simplification may be extended to any speculatable instruction,
1103 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1104 Instruction::BinaryOps Opc = I.getOpcode();
1105 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1106 Value *A, *CondVal, *TrueVal, *FalseVal;
1107 Value *CastOp;
1108
1109 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1110 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1111 A->getType()->getScalarSizeInBits() == 1 &&
1112 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1113 m_Value(FalseVal)));
1114 };
1115
1116 // Make sure one side of the binop is a select instruction, and the other is a
1117 // zero/sign extension operating on a i1.
1118 if (MatchSelectAndCast(LHS, RHS))
1119 CastOp = LHS;
1120 else if (MatchSelectAndCast(RHS, LHS))
1121 CastOp = RHS;
1122 else
1123 return nullptr;
1124
1126 ? nullptr
1127 : cast<SelectInst>(CastOp == LHS ? RHS : LHS);
1128
1129 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1130 bool IsCastOpRHS = (CastOp == RHS);
1131 bool IsZExt = isa<ZExtInst>(CastOp);
1132 Constant *C;
1133
1134 if (IsTrueArm) {
1135 C = Constant::getNullValue(V->getType());
1136 } else if (IsZExt) {
1137 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1138 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1139 } else {
1140 C = Constant::getAllOnesValue(V->getType());
1141 }
1142
1143 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1144 : Builder.CreateBinOp(Opc, C, V);
1145 };
1146
1147 // If the value used in the zext/sext is the select condition, or the negated
1148 // of the select condition, the binop can be simplified.
1149 if (CondVal == A) {
1150 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1151 return SelectInst::Create(CondVal, NewTrueVal,
1152 NewFoldedConst(true, FalseVal), "", nullptr, SI);
1153 }
1154
1155 if (match(A, m_Not(m_Specific(CondVal)))) {
1156 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1157 return SelectInst::Create(CondVal, NewTrueVal,
1158 NewFoldedConst(false, FalseVal), "", nullptr, SI);
1159 }
1160
1161 return nullptr;
1162}
1163
1165 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1168 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1169 Value *A, *B, *C, *D;
1170 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1171
1172 if (Op0)
1173 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1174 if (Op1)
1175 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1176
1177 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1178 // a common term.
1179 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1180 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1181 return V;
1182
1183 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1184 // term.
1185 if (Op0)
1186 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1187 if (Value *V =
1188 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1189 return V;
1190
1191 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1192 // term.
1193 if (Op1)
1194 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1195 if (Value *V =
1196 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1197 return V;
1198
1199 return nullptr;
1200}
1201
1202/// This tries to simplify binary operations which some other binary operation
1203/// distributes over either by factorizing out common terms
1204/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1205/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1206/// Returns the simplified value, or null if it didn't simplify.
1208 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1211 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1212
1213 // Factorization.
1214 if (Value *R = tryFactorizationFolds(I))
1215 return R;
1216
1217 // Expansion.
1218 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1219 // The instruction has the form "(A op' B) op C". See if expanding it out
1220 // to "(A op C) op' (B op C)" results in simplifications.
1221 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1222 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1223
1224 // Disable the use of undef because it's not safe to distribute undef.
1225 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1226 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1227 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1228
1229 // Do "A op C" and "B op C" both simplify?
1230 if (L && R) {
1231 // They do! Return "L op' R".
1232 ++NumExpand;
1233 C = Builder.CreateBinOp(InnerOpcode, L, R);
1234 C->takeName(&I);
1235 return C;
1236 }
1237
1238 // Does "A op C" simplify to the identity value for the inner opcode?
1239 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1240 // They do! Return "B op C".
1241 ++NumExpand;
1242 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1243 C->takeName(&I);
1244 return C;
1245 }
1246
1247 // Does "B op C" simplify to the identity value for the inner opcode?
1248 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1249 // They do! Return "A op C".
1250 ++NumExpand;
1251 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1252 C->takeName(&I);
1253 return C;
1254 }
1255 }
1256
1257 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1258 // The instruction has the form "A op (B op' C)". See if expanding it out
1259 // to "(A op B) op' (A op C)" results in simplifications.
1260 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1261 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1262
1263 // Disable the use of undef because it's not safe to distribute undef.
1264 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1265 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1266 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1267
1268 // Do "A op B" and "A op C" both simplify?
1269 if (L && R) {
1270 // They do! Return "L op' R".
1271 ++NumExpand;
1272 A = Builder.CreateBinOp(InnerOpcode, L, R);
1273 A->takeName(&I);
1274 return A;
1275 }
1276
1277 // Does "A op B" simplify to the identity value for the inner opcode?
1278 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1279 // They do! Return "A op C".
1280 ++NumExpand;
1281 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1282 A->takeName(&I);
1283 return A;
1284 }
1285
1286 // Does "A op C" simplify to the identity value for the inner opcode?
1287 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1288 // They do! Return "A op B".
1289 ++NumExpand;
1290 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1291 A->takeName(&I);
1292 return A;
1293 }
1294 }
1295
1296 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1297}
1298
1299static std::optional<std::pair<Value *, Value *>>
1301 if (LHS->getParent() != RHS->getParent())
1302 return std::nullopt;
1303
1304 if (LHS->getNumIncomingValues() < 2)
1305 return std::nullopt;
1306
1307 if (!equal(LHS->blocks(), RHS->blocks()))
1308 return std::nullopt;
1309
1310 Value *L0 = LHS->getIncomingValue(0);
1311 Value *R0 = RHS->getIncomingValue(0);
1312
1313 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1314 Value *L1 = LHS->getIncomingValue(I);
1315 Value *R1 = RHS->getIncomingValue(I);
1316
1317 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1318 continue;
1319
1320 return std::nullopt;
1321 }
1322
1323 return std::optional(std::pair(L0, R0));
1324}
1325
1326std::optional<std::pair<Value *, Value *>>
1327InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1330 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1331 return std::nullopt;
1332 switch (LHSInst->getOpcode()) {
1333 case Instruction::PHI:
1335 case Instruction::Select: {
1336 Value *Cond = LHSInst->getOperand(0);
1337 Value *TrueVal = LHSInst->getOperand(1);
1338 Value *FalseVal = LHSInst->getOperand(2);
1339 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1340 FalseVal == RHSInst->getOperand(1))
1341 return std::pair(TrueVal, FalseVal);
1342 return std::nullopt;
1343 }
1344 case Instruction::Call: {
1345 // Match min(a, b) and max(a, b)
1346 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1347 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1348 if (LHSMinMax && RHSMinMax &&
1349 LHSMinMax->getPredicate() ==
1351 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1352 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1353 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1354 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1355 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1356 return std::nullopt;
1357 }
1358 default:
1359 return std::nullopt;
1360 }
1361}
1362
1364 Value *LHS,
1365 Value *RHS) {
1366 Value *A, *B, *C, *D, *E, *F;
1367 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1368 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1369 if (!LHSIsSelect && !RHSIsSelect)
1370 return nullptr;
1371
1373 ? nullptr
1374 : cast<SelectInst>(LHSIsSelect ? LHS : RHS);
1375
1376 FastMathFlags FMF;
1378 if (const auto *FPOp = dyn_cast<FPMathOperator>(&I)) {
1379 FMF = FPOp->getFastMathFlags();
1380 Builder.setFastMathFlags(FMF);
1381 }
1382
1383 Instruction::BinaryOps Opcode = I.getOpcode();
1384 SimplifyQuery Q = SQ.getWithInstruction(&I);
1385
1386 Value *Cond, *True = nullptr, *False = nullptr;
1387
1388 // Special-case for add/negate combination. Replace the zero in the negation
1389 // with the trailing add operand:
1390 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1391 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1392 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1393 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1394 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1395 return nullptr;
1396 Value *N;
1397 if (True && match(FVal, m_Neg(m_Value(N)))) {
1398 Value *Sub = Builder.CreateSub(Z, N);
1399 return Builder.CreateSelect(Cond, True, Sub, I.getName(), SI);
1400 }
1401 if (False && match(TVal, m_Neg(m_Value(N)))) {
1402 Value *Sub = Builder.CreateSub(Z, N);
1403 return Builder.CreateSelect(Cond, Sub, False, I.getName(), SI);
1404 }
1405 return nullptr;
1406 };
1407
1408 if (LHSIsSelect && RHSIsSelect && A == D) {
1409 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1410 Cond = A;
1411 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1412 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1413
1414 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1415 if (False && !True)
1416 True = Builder.CreateBinOp(Opcode, B, E);
1417 else if (True && !False)
1418 False = Builder.CreateBinOp(Opcode, C, F);
1419 }
1420 } else if (LHSIsSelect && LHS->hasOneUse()) {
1421 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1422 Cond = A;
1423 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1424 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1425 if (Value *NewSel = foldAddNegate(B, C, RHS))
1426 return NewSel;
1427 } else if (RHSIsSelect && RHS->hasOneUse()) {
1428 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1429 Cond = D;
1430 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1431 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1432 if (Value *NewSel = foldAddNegate(E, F, LHS))
1433 return NewSel;
1434 }
1435
1436 if (!True || !False)
1437 return nullptr;
1438
1439 Value *NewSI = Builder.CreateSelect(Cond, True, False, I.getName(), SI);
1440 NewSI->takeName(&I);
1441 return NewSI;
1442}
1443
1444/// Freely adapt every user of V as-if V was changed to !V.
1445/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1447 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1448 for (User *U : make_early_inc_range(I->users())) {
1449 if (U == IgnoredUser)
1450 continue; // Don't consider this user.
1451 switch (cast<Instruction>(U)->getOpcode()) {
1452 case Instruction::Select: {
1453 auto *SI = cast<SelectInst>(U);
1454 SI->swapValues();
1455 SI->swapProfMetadata();
1456 break;
1457 }
1458 case Instruction::Br: {
1460 BI->swapSuccessors(); // swaps prof metadata too
1461 if (BPI)
1462 BPI->swapSuccEdgesProbabilities(BI->getParent());
1463 break;
1464 }
1465 case Instruction::Xor:
1467 // Add to worklist for DCE.
1469 break;
1470 default:
1471 llvm_unreachable("Got unexpected user - out of sync with "
1472 "canFreelyInvertAllUsersOf() ?");
1473 }
1474 }
1475
1476 // Update pre-existing debug value uses.
1477 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1478 llvm::findDbgValues(I, DbgVariableRecords);
1479
1480 for (DbgVariableRecord *DbgVal : DbgVariableRecords) {
1481 SmallVector<uint64_t, 1> Ops = {dwarf::DW_OP_not};
1482 for (unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1483 Idx != End; ++Idx)
1484 if (DbgVal->getVariableLocationOp(Idx) == I)
1485 DbgVal->setExpression(
1486 DIExpression::appendOpsToArg(DbgVal->getExpression(), Ops, Idx));
1487 }
1488}
1489
1490/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1491/// constant zero (which is the 'negate' form).
1492Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1493 Value *NegV;
1494 if (match(V, m_Neg(m_Value(NegV))))
1495 return NegV;
1496
1497 // Constants can be considered to be negated values if they can be folded.
1499 return ConstantExpr::getNeg(C);
1500
1502 if (C->getType()->getElementType()->isIntegerTy())
1503 return ConstantExpr::getNeg(C);
1504
1506 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1507 Constant *Elt = CV->getAggregateElement(i);
1508 if (!Elt)
1509 return nullptr;
1510
1511 if (isa<UndefValue>(Elt))
1512 continue;
1513
1514 if (!isa<ConstantInt>(Elt))
1515 return nullptr;
1516 }
1517 return ConstantExpr::getNeg(CV);
1518 }
1519
1520 // Negate integer vector splats.
1521 if (auto *CV = dyn_cast<Constant>(V))
1522 if (CV->getType()->isVectorTy() &&
1523 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1524 return ConstantExpr::getNeg(CV);
1525
1526 return nullptr;
1527}
1528
1529// Try to fold:
1530// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1531// -> ({s|u}itofp (int_binop x, y))
1532// 2) (fp_binop ({s|u}itofp x), FpC)
1533// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1534//
1535// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1536Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1537 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1539
1540 Type *FPTy = BO.getType();
1541 Type *IntTy = IntOps[0]->getType();
1542
1543 unsigned IntSz = IntTy->getScalarSizeInBits();
1544 // This is the maximum number of inuse bits by the integer where the int -> fp
1545 // casts are exact.
1546 unsigned MaxRepresentableBits =
1548
1549 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1550 // checks later on.
1551 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1552
1553 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1554 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1555 auto IsNonZero = [&](unsigned OpNo) -> bool {
1556 if (OpsKnown[OpNo].hasKnownBits() &&
1557 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1558 return true;
1559 return isKnownNonZero(IntOps[OpNo], SQ);
1560 };
1561
1562 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1563 // NB: This matches the impl in ValueTracking, we just try to use cached
1564 // knownbits here. If we ever start supporting WithCache for
1565 // `isKnownNonNegative`, change this to an explicit call.
1566 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1567 };
1568
1569 // Check if we know for certain that ({s|u}itofp op) is exact.
1570 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1571 // Can we treat this operand as the desired sign?
1572 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1573 !IsNonNeg(OpNo))
1574 return false;
1575
1576 // If fp precision >= bitwidth(op) then its exact.
1577 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1578 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1579 // handled specially. We can't, however, increase the bound arbitrarily for
1580 // `sitofp` as for larger sizes, it won't sign extend.
1581 if (MaxRepresentableBits < IntSz) {
1582 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1583 // numSignBits(op).
1584 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1585 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1586 if (OpsFromSigned)
1587 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1588 // Finally for unsigned check that fp precision >= bitwidth(op) -
1589 // numLeadingZeros(op).
1590 else {
1591 NumUsedLeadingBits[OpNo] =
1592 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1593 }
1594 }
1595 // NB: We could also check if op is known to be a power of 2 or zero (which
1596 // will always be representable). Its unlikely, however, that is we are
1597 // unable to bound op in any way we will be able to pass the overflow checks
1598 // later on.
1599
1600 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1601 return false;
1602 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1603 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1604 IsNonZero(OpNo);
1605 };
1606
1607 // If we have a constant rhs, see if we can losslessly convert it to an int.
1608 if (Op1FpC != nullptr) {
1609 // Signed + Mul req non-zero
1610 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1611 !match(Op1FpC, m_NonZeroFP()))
1612 return nullptr;
1613
1615 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1616 IntTy, DL);
1617 if (Op1IntC == nullptr)
1618 return nullptr;
1619 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1620 : Instruction::UIToFP,
1621 Op1IntC, FPTy, DL) != Op1FpC)
1622 return nullptr;
1623
1624 // First try to keep sign of cast the same.
1625 IntOps[1] = Op1IntC;
1626 }
1627
1628 // Ensure lhs/rhs integer types match.
1629 if (IntTy != IntOps[1]->getType())
1630 return nullptr;
1631
1632 if (Op1FpC == nullptr) {
1633 if (!IsValidPromotion(1))
1634 return nullptr;
1635 }
1636 if (!IsValidPromotion(0))
1637 return nullptr;
1638
1639 // Final we check if the integer version of the binop will not overflow.
1641 // Because of the precision check, we can often rule out overflows.
1642 bool NeedsOverflowCheck = true;
1643 // Try to conservatively rule out overflow based on the already done precision
1644 // checks.
1645 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1646 unsigned OverflowMaxCurBits =
1647 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1648 bool OutputSigned = OpsFromSigned;
1649 switch (BO.getOpcode()) {
1650 case Instruction::FAdd:
1651 IntOpc = Instruction::Add;
1652 OverflowMaxOutputBits += OverflowMaxCurBits;
1653 break;
1654 case Instruction::FSub:
1655 IntOpc = Instruction::Sub;
1656 OverflowMaxOutputBits += OverflowMaxCurBits;
1657 break;
1658 case Instruction::FMul:
1659 IntOpc = Instruction::Mul;
1660 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1661 break;
1662 default:
1663 llvm_unreachable("Unsupported binop");
1664 }
1665 // The precision check may have already ruled out overflow.
1666 if (OverflowMaxOutputBits < IntSz) {
1667 NeedsOverflowCheck = false;
1668 // We can bound unsigned overflow from sub to in range signed value (this is
1669 // what allows us to avoid the overflow check for sub).
1670 if (IntOpc == Instruction::Sub)
1671 OutputSigned = true;
1672 }
1673
1674 // Precision check did not rule out overflow, so need to check.
1675 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1676 // `IntOps[...]` arguments to `KnownOps[...]`.
1677 if (NeedsOverflowCheck &&
1678 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1679 return nullptr;
1680
1681 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1682 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1683 IntBO->setHasNoSignedWrap(OutputSigned);
1684 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1685 }
1686 if (OutputSigned)
1687 return new SIToFPInst(IntBinOp, FPTy);
1688 return new UIToFPInst(IntBinOp, FPTy);
1689}
1690
1691// Try to fold:
1692// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1693// -> ({s|u}itofp (int_binop x, y))
1694// 2) (fp_binop ({s|u}itofp x), FpC)
1695// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1696Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1697 // Don't perform the fold on vectors, as the integer operation may be much
1698 // more expensive than the float operation in that case.
1699 if (BO.getType()->isVectorTy())
1700 return nullptr;
1701
1702 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1703 Constant *Op1FpC = nullptr;
1704 // Check for:
1705 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1706 // 2) (binop ({s|u}itofp x), FpC)
1707 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1708 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1709 return nullptr;
1710
1711 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1712 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1713 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1714 return nullptr;
1715
1716 // Cache KnownBits a bit to potentially save some analysis.
1717 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1718
1719 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1720 // different constraints depending on the sign of the cast.
1721 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1722 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1723 IntOps, Op1FpC, OpsKnown))
1724 return R;
1725 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1726 Op1FpC, OpsKnown);
1727}
1728
1729/// A binop with a constant operand and a sign-extended boolean operand may be
1730/// converted into a select of constants by applying the binary operation to
1731/// the constant with the two possible values of the extended boolean (0 or -1).
1732Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1733 // TODO: Handle non-commutative binop (constant is operand 0).
1734 // TODO: Handle zext.
1735 // TODO: Peek through 'not' of cast.
1736 Value *BO0 = BO.getOperand(0);
1737 Value *BO1 = BO.getOperand(1);
1738 Value *X;
1739 Constant *C;
1740 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1741 !X->getType()->isIntOrIntVectorTy(1))
1742 return nullptr;
1743
1744 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1747 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1748 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1749 return createSelectInstWithUnknownProfile(X, TVal, FVal);
1750}
1751
1753 bool IsTrueArm) {
1755 for (Value *Op : I.operands()) {
1756 Value *V = nullptr;
1757 if (Op == SI) {
1758 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1759 } else if (match(SI->getCondition(),
1762 m_Specific(Op), m_Value(V))) &&
1764 // Pass
1765 } else if (match(Op, m_ZExt(m_Specific(SI->getCondition())))) {
1766 V = IsTrueArm ? ConstantInt::get(Op->getType(), 1)
1767 : ConstantInt::getNullValue(Op->getType());
1768 } else {
1769 V = Op;
1770 }
1771 Ops.push_back(V);
1772 }
1773
1774 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1775}
1776
1778 Value *NewOp, InstCombiner &IC) {
1779 Instruction *Clone = I.clone();
1780 Clone->replaceUsesOfWith(SI, NewOp);
1782 IC.InsertNewInstBefore(Clone, I.getIterator());
1783 return Clone;
1784}
1785
1787 bool FoldWithMultiUse,
1788 bool SimplifyBothArms) {
1789 // Don't modify shared select instructions unless set FoldWithMultiUse
1790 if (!SI->hasOneUser() && !FoldWithMultiUse)
1791 return nullptr;
1792
1793 Value *TV = SI->getTrueValue();
1794 Value *FV = SI->getFalseValue();
1795
1796 // Bool selects with constant operands can be folded to logical ops.
1797 if (SI->getType()->isIntOrIntVectorTy(1))
1798 return nullptr;
1799
1800 // Avoid breaking min/max reduction pattern,
1801 // which is necessary for vectorization later.
1803 for (Value *IntrinOp : Op.operands())
1804 if (auto *PN = dyn_cast<PHINode>(IntrinOp))
1805 for (Value *PhiOp : PN->operands())
1806 if (PhiOp == &Op)
1807 return nullptr;
1808
1809 // Test if a FCmpInst instruction is used exclusively by a select as
1810 // part of a minimum or maximum operation. If so, refrain from doing
1811 // any other folding. This helps out other analyses which understand
1812 // non-obfuscated minimum and maximum idioms. And in this case, at
1813 // least one of the comparison operands has at least one user besides
1814 // the compare (the select), which would often largely negate the
1815 // benefit of folding anyway.
1816 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1817 if (CI->hasOneUse()) {
1818 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1819 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1820 !CI->isCommutative())
1821 return nullptr;
1822 }
1823 }
1824
1825 // Make sure that one of the select arms folds successfully.
1826 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1827 Value *NewFV =
1828 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1829 if (!NewTV && !NewFV)
1830 return nullptr;
1831
1832 if (SimplifyBothArms && !(NewTV && NewFV))
1833 return nullptr;
1834
1835 // Create an instruction for the arm that did not fold.
1836 if (!NewTV)
1837 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1838 if (!NewFV)
1839 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1840 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1841}
1842
1844 Value *InValue, BasicBlock *InBB,
1845 const DataLayout &DL,
1846 const SimplifyQuery SQ) {
1847 // NB: It is a precondition of this transform that the operands be
1848 // phi translatable!
1850 for (Value *Op : I.operands()) {
1851 if (Op == PN)
1852 Ops.push_back(InValue);
1853 else
1854 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1855 }
1856
1857 // Don't consider the simplification successful if we get back a constant
1858 // expression. That's just an instruction in hiding.
1859 // Also reject the case where we simplify back to the phi node. We wouldn't
1860 // be able to remove it in that case.
1862 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1863 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1864 return NewVal;
1865
1866 // Check if incoming PHI value can be replaced with constant
1867 // based on implied condition.
1868 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1869 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1870 if (TerminatorBI && TerminatorBI->isConditional() &&
1871 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1872 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1873 std::optional<bool> ImpliedCond = isImpliedCondition(
1874 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1875 DL, LHSIsTrue);
1876 if (ImpliedCond)
1877 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1878 }
1879
1880 return nullptr;
1881}
1882
1883/// In some cases it is beneficial to fold a select into a binary operator.
1884/// For example:
1885/// %1 = or %in, 4
1886/// %2 = select %cond, %1, %in
1887/// %3 = or %2, 1
1888/// =>
1889/// %1 = select i1 %cond, 5, 1
1890/// %2 = or %1, %in
1892 assert(Op.isAssociative() && "The operation must be associative!");
1893
1894 SelectInst *SI = dyn_cast<SelectInst>(Op.getOperand(0));
1895
1896 Constant *Const;
1897 if (!SI || !match(Op.getOperand(1), m_ImmConstant(Const)) ||
1898 !Op.hasOneUse() || !SI->hasOneUse())
1899 return nullptr;
1900
1901 Value *TV = SI->getTrueValue();
1902 Value *FV = SI->getFalseValue();
1903 Value *Input, *NewTV, *NewFV;
1904 Constant *Const2;
1905
1906 if (TV->hasOneUse() && match(TV, m_BinOp(Op.getOpcode(), m_Specific(FV),
1907 m_ImmConstant(Const2)))) {
1908 NewTV = ConstantFoldBinaryInstruction(Op.getOpcode(), Const, Const2);
1909 NewFV = Const;
1910 Input = FV;
1911 } else if (FV->hasOneUse() &&
1912 match(FV, m_BinOp(Op.getOpcode(), m_Specific(TV),
1913 m_ImmConstant(Const2)))) {
1914 NewTV = Const;
1915 NewFV = ConstantFoldBinaryInstruction(Op.getOpcode(), Const, Const2);
1916 Input = TV;
1917 } else
1918 return nullptr;
1919
1920 if (!NewTV || !NewFV)
1921 return nullptr;
1922
1923 Value *NewSI =
1924 Builder.CreateSelect(SI->getCondition(), NewTV, NewFV, "",
1925 ProfcheckDisableMetadataFixes ? nullptr : SI);
1926 return BinaryOperator::Create(Op.getOpcode(), NewSI, Input);
1927}
1928
1930 bool AllowMultipleUses) {
1931 unsigned NumPHIValues = PN->getNumIncomingValues();
1932 if (NumPHIValues == 0)
1933 return nullptr;
1934
1935 // We normally only transform phis with a single use. However, if a PHI has
1936 // multiple uses and they are all the same operation, we can fold *all* of the
1937 // uses into the PHI.
1938 bool OneUse = PN->hasOneUse();
1939 bool IdenticalUsers = false;
1940 if (!AllowMultipleUses && !OneUse) {
1941 // Walk the use list for the instruction, comparing them to I.
1942 for (User *U : PN->users()) {
1944 if (UI != &I && !I.isIdenticalTo(UI))
1945 return nullptr;
1946 }
1947 // Otherwise, we can replace *all* users with the new PHI we form.
1948 IdenticalUsers = true;
1949 }
1950
1951 // Check that all operands are phi-translatable.
1952 for (Value *Op : I.operands()) {
1953 if (Op == PN)
1954 continue;
1955
1956 // Non-instructions never require phi-translation.
1957 auto *I = dyn_cast<Instruction>(Op);
1958 if (!I)
1959 continue;
1960
1961 // Phi-translate can handle phi nodes in the same block.
1962 if (isa<PHINode>(I))
1963 if (I->getParent() == PN->getParent())
1964 continue;
1965
1966 // Operand dominates the block, no phi-translation necessary.
1967 if (DT.dominates(I, PN->getParent()))
1968 continue;
1969
1970 // Not phi-translatable, bail out.
1971 return nullptr;
1972 }
1973
1974 // Check to see whether the instruction can be folded into each phi operand.
1975 // If there is one operand that does not fold, remember the BB it is in.
1976 SmallVector<Value *> NewPhiValues;
1977 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1978 bool SeenNonSimplifiedInVal = false;
1979 for (unsigned i = 0; i != NumPHIValues; ++i) {
1980 Value *InVal = PN->getIncomingValue(i);
1981 BasicBlock *InBB = PN->getIncomingBlock(i);
1982
1983 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1984 NewPhiValues.push_back(NewVal);
1985 continue;
1986 }
1987
1988 // Handle some cases that can't be fully simplified, but where we know that
1989 // the two instructions will fold into one.
1990 auto WillFold = [&]() {
1991 if (!InVal->hasUseList() || !InVal->hasOneUser())
1992 return false;
1993
1994 // icmp of ucmp/scmp with constant will fold to icmp.
1995 const APInt *Ignored;
1996 if (isa<CmpIntrinsic>(InVal) &&
1997 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1998 return true;
1999
2000 // icmp eq zext(bool), 0 will fold to !bool.
2001 if (isa<ZExtInst>(InVal) &&
2002 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
2003 match(&I,
2005 return true;
2006
2007 return false;
2008 };
2009
2010 if (WillFold()) {
2011 OpsToMoveUseToIncomingBB.push_back(i);
2012 NewPhiValues.push_back(nullptr);
2013 continue;
2014 }
2015
2016 if (!OneUse && !IdenticalUsers)
2017 return nullptr;
2018
2019 if (SeenNonSimplifiedInVal)
2020 return nullptr; // More than one non-simplified value.
2021 SeenNonSimplifiedInVal = true;
2022
2023 // If there is exactly one non-simplified value, we can insert a copy of the
2024 // operation in that block. However, if this is a critical edge, we would
2025 // be inserting the computation on some other paths (e.g. inside a loop).
2026 // Only do this if the pred block is unconditionally branching into the phi
2027 // block. Also, make sure that the pred block is not dead code.
2029 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
2030 return nullptr;
2031
2032 NewPhiValues.push_back(nullptr);
2033 OpsToMoveUseToIncomingBB.push_back(i);
2034
2035 // Do not push the operation across a loop backedge. This could result in
2036 // an infinite combine loop, and is generally non-profitable (especially
2037 // if the operation was originally outside the loop).
2038 if (isBackEdge(InBB, PN->getParent()))
2039 return nullptr;
2040 }
2041
2042 // Clone the instruction that uses the phi node and move it into the incoming
2043 // BB because we know that the next iteration of InstCombine will simplify it.
2045 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
2047 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
2048
2049 Instruction *Clone = Clones.lookup(OpBB);
2050 if (!Clone) {
2051 Clone = I.clone();
2052 for (Use &U : Clone->operands()) {
2053 if (U == PN)
2054 U = Op;
2055 else
2056 U = U->DoPHITranslation(PN->getParent(), OpBB);
2057 }
2058 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
2059 Clones.insert({OpBB, Clone});
2060 // We may have speculated the instruction.
2062 }
2063
2064 NewPhiValues[OpIndex] = Clone;
2065 }
2066
2067 // Okay, we can do the transformation: create the new PHI node.
2068 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
2069 InsertNewInstBefore(NewPN, PN->getIterator());
2070 NewPN->takeName(PN);
2071 NewPN->setDebugLoc(PN->getDebugLoc());
2072
2073 for (unsigned i = 0; i != NumPHIValues; ++i)
2074 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
2075
2076 if (IdenticalUsers) {
2077 // Collect and deduplicate users up-front to avoid iterator invalidation.
2079 for (User *U : PN->users()) {
2081 if (User == &I)
2082 continue;
2083 ToReplace.insert(User);
2084 }
2085 for (Instruction *I : ToReplace) {
2086 replaceInstUsesWith(*I, NewPN);
2088 }
2089 OneUse = true;
2090 }
2091
2092 if (OneUse) {
2093 replaceAllDbgUsesWith(*PN, *NewPN, *PN, DT);
2094 }
2095 return replaceInstUsesWith(I, NewPN);
2096}
2097
2099 if (!BO.isAssociative())
2100 return nullptr;
2101
2102 // Find the interleaved binary ops.
2103 auto Opc = BO.getOpcode();
2104 auto *BO0 = dyn_cast<BinaryOperator>(BO.getOperand(0));
2105 auto *BO1 = dyn_cast<BinaryOperator>(BO.getOperand(1));
2106 if (!BO0 || !BO1 || !BO0->hasNUses(2) || !BO1->hasNUses(2) ||
2107 BO0->getOpcode() != Opc || BO1->getOpcode() != Opc ||
2108 !BO0->isAssociative() || !BO1->isAssociative() ||
2109 BO0->getParent() != BO1->getParent())
2110 return nullptr;
2111
2112 assert(BO.isCommutative() && BO0->isCommutative() && BO1->isCommutative() &&
2113 "Expected commutative instructions!");
2114
2115 // Find the matching phis, forming the recurrences.
2116 PHINode *PN0, *PN1;
2117 Value *Start0, *Step0, *Start1, *Step1;
2118 if (!matchSimpleRecurrence(BO0, PN0, Start0, Step0) || !PN0->hasOneUse() ||
2119 !matchSimpleRecurrence(BO1, PN1, Start1, Step1) || !PN1->hasOneUse() ||
2120 PN0->getParent() != PN1->getParent())
2121 return nullptr;
2122
2123 assert(PN0->getNumIncomingValues() == 2 && PN1->getNumIncomingValues() == 2 &&
2124 "Expected PHIs with two incoming values!");
2125
2126 // Convert the start and step values to constants.
2127 auto *Init0 = dyn_cast<Constant>(Start0);
2128 auto *Init1 = dyn_cast<Constant>(Start1);
2129 auto *C0 = dyn_cast<Constant>(Step0);
2130 auto *C1 = dyn_cast<Constant>(Step1);
2131 if (!Init0 || !Init1 || !C0 || !C1)
2132 return nullptr;
2133
2134 // Fold the recurrence constants.
2135 auto *Init = ConstantFoldBinaryInstruction(Opc, Init0, Init1);
2136 auto *C = ConstantFoldBinaryInstruction(Opc, C0, C1);
2137 if (!Init || !C)
2138 return nullptr;
2139
2140 // Create the reduced PHI.
2141 auto *NewPN = PHINode::Create(PN0->getType(), PN0->getNumIncomingValues(),
2142 "reduced.phi");
2143
2144 // Create the new binary op.
2145 auto *NewBO = BinaryOperator::Create(Opc, NewPN, C);
2146 if (Opc == Instruction::FAdd || Opc == Instruction::FMul) {
2147 // Intersect FMF flags for FADD and FMUL.
2148 FastMathFlags Intersect = BO0->getFastMathFlags() &
2149 BO1->getFastMathFlags() & BO.getFastMathFlags();
2150 NewBO->setFastMathFlags(Intersect);
2151 } else {
2152 OverflowTracking Flags;
2153 Flags.AllKnownNonNegative = false;
2154 Flags.AllKnownNonZero = false;
2155 Flags.mergeFlags(*BO0);
2156 Flags.mergeFlags(*BO1);
2157 Flags.mergeFlags(BO);
2158 Flags.applyFlags(*NewBO);
2159 }
2160 NewBO->takeName(&BO);
2161
2162 for (unsigned I = 0, E = PN0->getNumIncomingValues(); I != E; ++I) {
2163 auto *V = PN0->getIncomingValue(I);
2164 auto *BB = PN0->getIncomingBlock(I);
2165 if (V == Init0) {
2166 assert(((PN1->getIncomingValue(0) == Init1 &&
2167 PN1->getIncomingBlock(0) == BB) ||
2168 (PN1->getIncomingValue(1) == Init1 &&
2169 PN1->getIncomingBlock(1) == BB)) &&
2170 "Invalid incoming block!");
2171 NewPN->addIncoming(Init, BB);
2172 } else if (V == BO0) {
2173 assert(((PN1->getIncomingValue(0) == BO1 &&
2174 PN1->getIncomingBlock(0) == BB) ||
2175 (PN1->getIncomingValue(1) == BO1 &&
2176 PN1->getIncomingBlock(1) == BB)) &&
2177 "Invalid incoming block!");
2178 NewPN->addIncoming(NewBO, BB);
2179 } else
2180 llvm_unreachable("Unexpected incoming value!");
2181 }
2182
2183 LLVM_DEBUG(dbgs() << " Combined " << *PN0 << "\n " << *BO0
2184 << "\n with " << *PN1 << "\n " << *BO1
2185 << '\n');
2186
2187 // Insert the new recurrence and remove the old (dead) ones.
2188 InsertNewInstWith(NewPN, PN0->getIterator());
2189 InsertNewInstWith(NewBO, BO0->getIterator());
2190
2197
2198 return replaceInstUsesWith(BO, NewBO);
2199}
2200
2202 // Attempt to fold binary operators whose operands are simple recurrences.
2203 if (auto *NewBO = foldBinopWithRecurrence(BO))
2204 return NewBO;
2205
2206 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
2207 // we are guarding against replicating the binop in >1 predecessor.
2208 // This could miss matching a phi with 2 constant incoming values.
2209 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
2210 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
2211 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2212 Phi0->getNumOperands() != Phi1->getNumOperands())
2213 return nullptr;
2214
2215 // TODO: Remove the restriction for binop being in the same block as the phis.
2216 if (BO.getParent() != Phi0->getParent() ||
2217 BO.getParent() != Phi1->getParent())
2218 return nullptr;
2219
2220 // Fold if there is at least one specific constant value in phi0 or phi1's
2221 // incoming values that comes from the same block and this specific constant
2222 // value can be used to do optimization for specific binary operator.
2223 // For example:
2224 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
2225 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
2226 // %add = add i32 %phi0, %phi1
2227 // ==>
2228 // %add = phi i32 [%j, %bb0], [%i, %bb1]
2230 /*AllowRHSConstant*/ false);
2231 if (C) {
2232 SmallVector<Value *, 4> NewIncomingValues;
2233 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
2234 auto &Phi0Use = std::get<0>(T);
2235 auto &Phi1Use = std::get<1>(T);
2236 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2237 return false;
2238 Value *Phi0UseV = Phi0Use.get();
2239 Value *Phi1UseV = Phi1Use.get();
2240 if (Phi0UseV == C)
2241 NewIncomingValues.push_back(Phi1UseV);
2242 else if (Phi1UseV == C)
2243 NewIncomingValues.push_back(Phi0UseV);
2244 else
2245 return false;
2246 return true;
2247 };
2248
2249 if (all_of(zip(Phi0->operands(), Phi1->operands()),
2250 CanFoldIncomingValuePair)) {
2251 PHINode *NewPhi =
2252 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
2253 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
2254 "The number of collected incoming values should equal the number "
2255 "of the original PHINode operands!");
2256 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
2257 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
2258 return NewPhi;
2259 }
2260 }
2261
2262 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2263 return nullptr;
2264
2265 // Match a pair of incoming constants for one of the predecessor blocks.
2266 BasicBlock *ConstBB, *OtherBB;
2267 Constant *C0, *C1;
2268 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2269 ConstBB = Phi0->getIncomingBlock(0);
2270 OtherBB = Phi0->getIncomingBlock(1);
2271 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2272 ConstBB = Phi0->getIncomingBlock(1);
2273 OtherBB = Phi0->getIncomingBlock(0);
2274 } else {
2275 return nullptr;
2276 }
2277 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2278 return nullptr;
2279
2280 // The block that we are hoisting to must reach here unconditionally.
2281 // Otherwise, we could be speculatively executing an expensive or
2282 // non-speculative op.
2283 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2284 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2285 !DT.isReachableFromEntry(OtherBB))
2286 return nullptr;
2287
2288 // TODO: This check could be tightened to only apply to binops (div/rem) that
2289 // are not safe to speculatively execute. But that could allow hoisting
2290 // potentially expensive instructions (fdiv for example).
2291 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2293 return nullptr;
2294
2295 // Fold constants for the predecessor block with constant incoming values.
2296 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2297 if (!NewC)
2298 return nullptr;
2299
2300 // Make a new binop in the predecessor block with the non-constant incoming
2301 // values.
2302 Builder.SetInsertPoint(PredBlockBranch);
2303 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2304 Phi0->getIncomingValueForBlock(OtherBB),
2305 Phi1->getIncomingValueForBlock(OtherBB));
2306 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2307 NotFoldedNewBO->copyIRFlags(&BO);
2308
2309 // Replace the binop with a phi of the new values. The old phis are dead.
2310 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2311 NewPhi->addIncoming(NewBO, OtherBB);
2312 NewPhi->addIncoming(NewC, ConstBB);
2313 return NewPhi;
2314}
2315
2317 bool IsOtherParamConst = isa<Constant>(I.getOperand(1));
2318
2319 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2320 if (Instruction *NewSel =
2321 FoldOpIntoSelect(I, Sel, false, !IsOtherParamConst))
2322 return NewSel;
2323 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2324 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2325 return NewPhi;
2326 }
2327 return nullptr;
2328}
2329
2331 // If this GEP has only 0 indices, it is the same pointer as
2332 // Src. If Src is not a trivial GEP too, don't combine
2333 // the indices.
2334 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2335 !Src.hasOneUse())
2336 return false;
2337 return true;
2338}
2339
2340/// Find a constant NewC that has property:
2341/// shuffle(NewC, ShMask) = C
2342/// Returns nullptr if such a constant does not exist e.g. ShMask=<0,0> C=<1,2>
2343///
2344/// A 1-to-1 mapping is not required. Example:
2345/// ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <poison,5,6,poison>
2347 VectorType *NewCTy) {
2348 if (isa<ScalableVectorType>(NewCTy)) {
2349 Constant *Splat = C->getSplatValue();
2350 if (!Splat)
2351 return nullptr;
2353 }
2354
2355 if (cast<FixedVectorType>(NewCTy)->getNumElements() >
2356 cast<FixedVectorType>(C->getType())->getNumElements())
2357 return nullptr;
2358
2359 unsigned NewCNumElts = cast<FixedVectorType>(NewCTy)->getNumElements();
2360 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2361 SmallVector<Constant *, 16> NewVecC(NewCNumElts, PoisonScalar);
2362 unsigned NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
2363 for (unsigned I = 0; I < NumElts; ++I) {
2364 Constant *CElt = C->getAggregateElement(I);
2365 if (ShMask[I] >= 0) {
2366 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2367 Constant *NewCElt = NewVecC[ShMask[I]];
2368 // Bail out if:
2369 // 1. The constant vector contains a constant expression.
2370 // 2. The shuffle needs an element of the constant vector that can't
2371 // be mapped to a new constant vector.
2372 // 3. This is a widening shuffle that copies elements of V1 into the
2373 // extended elements (extending with poison is allowed).
2374 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2375 I >= NewCNumElts)
2376 return nullptr;
2377 NewVecC[ShMask[I]] = CElt;
2378 }
2379 }
2380 return ConstantVector::get(NewVecC);
2381}
2382
2383// Get the result of `Vector Op Splat` (or Splat Op Vector if \p SplatLHS).
2385 Constant *Splat, bool SplatLHS,
2386 const DataLayout &DL) {
2387 ElementCount EC = cast<VectorType>(Vector->getType())->getElementCount();
2389 Constant *RHS = Vector;
2390 if (!SplatLHS)
2391 std::swap(LHS, RHS);
2392 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
2393}
2394
2396 if (!isa<VectorType>(Inst.getType()))
2397 return nullptr;
2398
2399 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2400 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2401 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2402 cast<VectorType>(Inst.getType())->getElementCount());
2403 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2404 cast<VectorType>(Inst.getType())->getElementCount());
2405
2406 auto foldConstantsThroughSubVectorInsertSplat =
2407 [&](Value *MaybeSubVector, Value *MaybeSplat,
2408 bool SplatLHS) -> Instruction * {
2409 Value *Idx;
2410 Constant *Splat, *SubVector, *Dest;
2411 if (!match(MaybeSplat, m_ConstantSplat(m_Constant(Splat))) ||
2412 !match(MaybeSubVector,
2413 m_VectorInsert(m_Constant(Dest), m_Constant(SubVector),
2414 m_Value(Idx))))
2415 return nullptr;
2416 SubVector =
2417 constantFoldBinOpWithSplat(Opcode, SubVector, Splat, SplatLHS, DL);
2418 Dest = constantFoldBinOpWithSplat(Opcode, Dest, Splat, SplatLHS, DL);
2419 if (!SubVector || !Dest)
2420 return nullptr;
2421 auto *InsertVector =
2422 Builder.CreateInsertVector(Dest->getType(), Dest, SubVector, Idx);
2423 return replaceInstUsesWith(Inst, InsertVector);
2424 };
2425
2426 // If one operand is a constant splat and the other operand is a
2427 // `vector.insert` where both the destination and subvector are constant,
2428 // apply the operation to both the destination and subvector, returning a new
2429 // constant `vector.insert`. This helps constant folding for scalable vectors.
2430 if (Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2431 /*MaybeSubVector=*/LHS, /*MaybeSplat=*/RHS, /*SplatLHS=*/false))
2432 return Folded;
2433 if (Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2434 /*MaybeSubVector=*/RHS, /*MaybeSplat=*/LHS, /*SplatLHS=*/true))
2435 return Folded;
2436
2437 // If both operands of the binop are vector concatenations, then perform the
2438 // narrow binop on each pair of the source operands followed by concatenation
2439 // of the results.
2440 Value *L0, *L1, *R0, *R1;
2441 ArrayRef<int> Mask;
2442 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2443 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2444 LHS->hasOneUse() && RHS->hasOneUse() &&
2445 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2446 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2447 // This transform does not have the speculative execution constraint as
2448 // below because the shuffle is a concatenation. The new binops are
2449 // operating on exactly the same elements as the existing binop.
2450 // TODO: We could ease the mask requirement to allow different undef lanes,
2451 // but that requires an analysis of the binop-with-undef output value.
2452 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2453 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2454 BO->copyIRFlags(&Inst);
2455 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2456 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2457 BO->copyIRFlags(&Inst);
2458 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2459 }
2460
2461 auto createBinOpReverse = [&](Value *X, Value *Y) {
2462 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2463 if (auto *BO = dyn_cast<BinaryOperator>(V))
2464 BO->copyIRFlags(&Inst);
2465 Module *M = Inst.getModule();
2467 M, Intrinsic::vector_reverse, V->getType());
2468 return CallInst::Create(F, V);
2469 };
2470
2471 // NOTE: Reverse shuffles don't require the speculative execution protection
2472 // below because they don't affect which lanes take part in the computation.
2473
2474 Value *V1, *V2;
2475 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2476 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2477 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2478 (LHS->hasOneUse() || RHS->hasOneUse() ||
2479 (LHS == RHS && LHS->hasNUses(2))))
2480 return createBinOpReverse(V1, V2);
2481
2482 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2483 if (LHS->hasOneUse() && isSplatValue(RHS))
2484 return createBinOpReverse(V1, RHS);
2485 }
2486 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2487 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2488 return createBinOpReverse(LHS, V2);
2489
2490 auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
2491 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2492 if (auto *BO = dyn_cast<BinaryOperator>(V))
2493 BO->copyIRFlags(&Inst);
2494
2495 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
2496 Value *AllTrueMask = Builder.CreateVectorSplat(EC, Builder.getTrue());
2497 Module *M = Inst.getModule();
2499 M, Intrinsic::experimental_vp_reverse, V->getType());
2500 return CallInst::Create(F, {V, AllTrueMask, EVL});
2501 };
2502
2503 Value *EVL;
2505 m_Value(V1), m_AllOnes(), m_Value(EVL)))) {
2506 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2508 m_Value(V2), m_AllOnes(), m_Specific(EVL))) &&
2509 (LHS->hasOneUse() || RHS->hasOneUse() ||
2510 (LHS == RHS && LHS->hasNUses(2))))
2511 return createBinOpVPReverse(V1, V2, EVL);
2512
2513 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2514 if (LHS->hasOneUse() && isSplatValue(RHS))
2515 return createBinOpVPReverse(V1, RHS, EVL);
2516 }
2517 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2518 else if (isSplatValue(LHS) &&
2520 m_Value(V2), m_AllOnes(), m_Value(EVL))))
2521 return createBinOpVPReverse(LHS, V2, EVL);
2522
2523 // It may not be safe to reorder shuffles and things like div, urem, etc.
2524 // because we may trap when executing those ops on unknown vector elements.
2525 // See PR20059.
2527 return nullptr;
2528
2529 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2530 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2531 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2532 BO->copyIRFlags(&Inst);
2533 return new ShuffleVectorInst(XY, M);
2534 };
2535
2536 // If both arguments of the binary operation are shuffles that use the same
2537 // mask and shuffle within a single vector, move the shuffle after the binop.
2538 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2539 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2540 V1->getType() == V2->getType() &&
2541 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2542 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2543 return createBinOpShuffle(V1, V2, Mask);
2544 }
2545
2546 // If both arguments of a commutative binop are select-shuffles that use the
2547 // same mask with commuted operands, the shuffles are unnecessary.
2548 if (Inst.isCommutative() &&
2549 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2550 match(RHS,
2551 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2552 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2553 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2554 // TODO: Allow shuffles that contain undefs in the mask?
2555 // That is legal, but it reduces undef knowledge.
2556 // TODO: Allow arbitrary shuffles by shuffling after binop?
2557 // That might be legal, but we have to deal with poison.
2558 if (LShuf->isSelect() &&
2559 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2560 RShuf->isSelect() &&
2561 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2562 // Example:
2563 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2564 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2565 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2566 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2567 NewBO->copyIRFlags(&Inst);
2568 return NewBO;
2569 }
2570 }
2571
2572 // If one argument is a shuffle within one vector and the other is a constant,
2573 // try moving the shuffle after the binary operation. This canonicalization
2574 // intends to move shuffles closer to other shuffles and binops closer to
2575 // other binops, so they can be folded. It may also enable demanded elements
2576 // transforms.
2577 Constant *C;
2579 m_Mask(Mask))),
2580 m_ImmConstant(C)))) {
2581 assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() &&
2582 "Shuffle should not change scalar type");
2583
2584 bool ConstOp1 = isa<Constant>(RHS);
2585 if (Constant *NewC =
2587 // For fixed vectors, lanes of NewC not used by the shuffle will be poison
2588 // which will cause UB for div/rem. Mask them with a safe constant.
2589 if (isa<FixedVectorType>(V1->getType()) && Inst.isIntDivRem())
2590 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2591
2592 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2593 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2594 Value *NewLHS = ConstOp1 ? V1 : NewC;
2595 Value *NewRHS = ConstOp1 ? NewC : V1;
2596 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2597 }
2598 }
2599
2600 // Try to reassociate to sink a splat shuffle after a binary operation.
2601 if (Inst.isAssociative() && Inst.isCommutative()) {
2602 // Canonicalize shuffle operand as LHS.
2603 if (isa<ShuffleVectorInst>(RHS))
2604 std::swap(LHS, RHS);
2605
2606 Value *X;
2607 ArrayRef<int> MaskC;
2608 int SplatIndex;
2609 Value *Y, *OtherOp;
2610 if (!match(LHS,
2611 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2612 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2613 X->getType() != Inst.getType() ||
2614 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2615 return nullptr;
2616
2617 // FIXME: This may not be safe if the analysis allows undef elements. By
2618 // moving 'Y' before the splat shuffle, we are implicitly assuming
2619 // that it is not undef/poison at the splat index.
2620 if (isSplatValue(OtherOp, SplatIndex)) {
2621 std::swap(Y, OtherOp);
2622 } else if (!isSplatValue(Y, SplatIndex)) {
2623 return nullptr;
2624 }
2625
2626 // X and Y are splatted values, so perform the binary operation on those
2627 // values followed by a splat followed by the 2nd binary operation:
2628 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2629 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2630 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2631 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2632 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2633
2634 // Intersect FMF on both new binops. Other (poison-generating) flags are
2635 // dropped to be safe.
2636 if (isa<FPMathOperator>(R)) {
2637 R->copyFastMathFlags(&Inst);
2638 R->andIRFlags(RHS);
2639 }
2640 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2641 NewInstBO->copyIRFlags(R);
2642 return R;
2643 }
2644
2645 return nullptr;
2646}
2647
2648/// Try to narrow the width of a binop if at least 1 operand is an extend of
2649/// of a value. This requires a potentially expensive known bits check to make
2650/// sure the narrow op does not overflow.
2651Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2652 // We need at least one extended operand.
2653 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2654
2655 // If this is a sub, we swap the operands since we always want an extension
2656 // on the RHS. The LHS can be an extension or a constant.
2657 if (BO.getOpcode() == Instruction::Sub)
2658 std::swap(Op0, Op1);
2659
2660 Value *X;
2661 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2662 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2663 return nullptr;
2664
2665 // If both operands are the same extension from the same source type and we
2666 // can eliminate at least one (hasOneUse), this might work.
2667 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2668 Value *Y;
2669 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2670 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2671 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2672 // If that did not match, see if we have a suitable constant operand.
2673 // Truncating and extending must produce the same constant.
2674 Constant *WideC;
2675 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2676 return nullptr;
2677 Constant *NarrowC = getLosslessInvCast(WideC, X->getType(), CastOpc, DL);
2678 if (!NarrowC)
2679 return nullptr;
2680 Y = NarrowC;
2681 }
2682
2683 // Swap back now that we found our operands.
2684 if (BO.getOpcode() == Instruction::Sub)
2685 std::swap(X, Y);
2686
2687 // Both operands have narrow versions. Last step: the math must not overflow
2688 // in the narrow width.
2689 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2690 return nullptr;
2691
2692 // bo (ext X), (ext Y) --> ext (bo X, Y)
2693 // bo (ext X), C --> ext (bo X, C')
2694 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2695 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2696 if (IsSext)
2697 NewBinOp->setHasNoSignedWrap();
2698 else
2699 NewBinOp->setHasNoUnsignedWrap();
2700 }
2701 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2702}
2703
2704/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2705/// transform.
2710
2711/// Thread a GEP operation with constant indices through the constant true/false
2712/// arms of a select.
2714 InstCombiner::BuilderTy &Builder) {
2715 if (!GEP.hasAllConstantIndices())
2716 return nullptr;
2717
2718 Instruction *Sel;
2719 Value *Cond;
2720 Constant *TrueC, *FalseC;
2721 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2722 !match(Sel,
2723 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2724 return nullptr;
2725
2726 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2727 // Propagate 'inbounds' and metadata from existing instructions.
2728 // Note: using IRBuilder to create the constants for efficiency.
2729 SmallVector<Value *, 4> IndexC(GEP.indices());
2730 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2731 Type *Ty = GEP.getSourceElementType();
2732 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2733 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2734 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2735}
2736
2737// Canonicalization:
2738// gep T, (gep i8, base, C1), (Index + C2) into
2739// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2741 GEPOperator *Src,
2742 InstCombinerImpl &IC) {
2743 if (GEP.getNumIndices() != 1)
2744 return nullptr;
2745 auto &DL = IC.getDataLayout();
2746 Value *Base;
2747 const APInt *C1;
2748 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2749 return nullptr;
2750 Value *VarIndex;
2751 const APInt *C2;
2752 Type *PtrTy = Src->getType()->getScalarType();
2753 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2754 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2755 return nullptr;
2756 if (C1->getBitWidth() != IndexSizeInBits ||
2757 C2->getBitWidth() != IndexSizeInBits)
2758 return nullptr;
2759 Type *BaseType = GEP.getSourceElementType();
2761 return nullptr;
2762 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2763 APInt NewOffset = TypeSize * *C2 + *C1;
2764 if (NewOffset.isZero() ||
2765 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2767 if (GEP.hasNoUnsignedWrap() &&
2768 cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
2769 match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()))) {
2771 if (GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
2772 Flags |= GEPNoWrapFlags::inBounds();
2773 }
2774
2775 Value *GEPConst =
2776 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset), "", Flags);
2777 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex, Flags);
2778 }
2779
2780 return nullptr;
2781}
2782
2783/// Combine constant offsets separated by variable offsets.
2784/// ptradd (ptradd (ptradd p, C1), x), C2 -> ptradd (ptradd p, x), C1+C2
2786 InstCombinerImpl &IC) {
2787 if (!GEP.hasAllConstantIndices())
2788 return nullptr;
2789
2792 auto *InnerGEP = dyn_cast<GetElementPtrInst>(GEP.getPointerOperand());
2793 while (true) {
2794 if (!InnerGEP)
2795 return nullptr;
2796
2797 NW = NW.intersectForReassociate(InnerGEP->getNoWrapFlags());
2798 if (InnerGEP->hasAllConstantIndices())
2799 break;
2800
2801 if (!InnerGEP->hasOneUse())
2802 return nullptr;
2803
2804 Skipped.push_back(InnerGEP);
2805 InnerGEP = dyn_cast<GetElementPtrInst>(InnerGEP->getPointerOperand());
2806 }
2807
2808 // The two constant offset GEPs are directly adjacent: Let normal offset
2809 // merging handle it.
2810 if (Skipped.empty())
2811 return nullptr;
2812
2813 // FIXME: This one-use check is not strictly necessary. Consider relaxing it
2814 // if profitable.
2815 if (!InnerGEP->hasOneUse())
2816 return nullptr;
2817
2818 // Don't bother with vector splats.
2819 Type *Ty = GEP.getType();
2820 if (InnerGEP->getType() != Ty)
2821 return nullptr;
2822
2823 const DataLayout &DL = IC.getDataLayout();
2824 APInt Offset(DL.getIndexTypeSizeInBits(Ty), 0);
2825 if (!GEP.accumulateConstantOffset(DL, Offset) ||
2826 !InnerGEP->accumulateConstantOffset(DL, Offset))
2827 return nullptr;
2828
2829 IC.replaceOperand(*Skipped.back(), 0, InnerGEP->getPointerOperand());
2830 for (GetElementPtrInst *SkippedGEP : Skipped)
2831 SkippedGEP->setNoWrapFlags(NW);
2832
2833 return IC.replaceInstUsesWith(
2834 GEP,
2835 IC.Builder.CreatePtrAdd(Skipped.front(), IC.Builder.getInt(Offset), "",
2836 NW.intersectForOffsetAdd(GEP.getNoWrapFlags())));
2837}
2838
2840 GEPOperator *Src) {
2841 // Combine Indices - If the source pointer to this getelementptr instruction
2842 // is a getelementptr instruction with matching element type, combine the
2843 // indices of the two getelementptr instructions into a single instruction.
2844 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2845 return nullptr;
2846
2847 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2848 return I;
2849
2850 if (auto *I = combineConstantOffsets(GEP, *this))
2851 return I;
2852
2853 if (Src->getResultElementType() != GEP.getSourceElementType())
2854 return nullptr;
2855
2856 // Fold chained GEP with constant base into single GEP:
2857 // gep i8, (gep i8, %base, C1), (select Cond, C2, C3)
2858 // -> gep i8, %base, (select Cond, C1+C2, C1+C3)
2859 if (Src->hasOneUse() && GEP.getNumIndices() == 1 &&
2860 Src->getNumIndices() == 1) {
2861 Value *SrcIdx = *Src->idx_begin();
2862 Value *GEPIdx = *GEP.idx_begin();
2863 const APInt *ConstOffset, *TrueVal, *FalseVal;
2864 Value *Cond;
2865
2866 if ((match(SrcIdx, m_APInt(ConstOffset)) &&
2867 match(GEPIdx,
2868 m_Select(m_Value(Cond), m_APInt(TrueVal), m_APInt(FalseVal)))) ||
2869 (match(GEPIdx, m_APInt(ConstOffset)) &&
2870 match(SrcIdx,
2871 m_Select(m_Value(Cond), m_APInt(TrueVal), m_APInt(FalseVal))))) {
2872 auto *Select = isa<SelectInst>(GEPIdx) ? cast<SelectInst>(GEPIdx)
2873 : cast<SelectInst>(SrcIdx);
2874
2875 // Make sure the select has only one use.
2876 if (!Select->hasOneUse())
2877 return nullptr;
2878
2879 if (TrueVal->getBitWidth() != ConstOffset->getBitWidth() ||
2880 FalseVal->getBitWidth() != ConstOffset->getBitWidth())
2881 return nullptr;
2882
2883 APInt NewTrueVal = *ConstOffset + *TrueVal;
2884 APInt NewFalseVal = *ConstOffset + *FalseVal;
2885 Constant *NewTrue = ConstantInt::get(Select->getType(), NewTrueVal);
2886 Constant *NewFalse = ConstantInt::get(Select->getType(), NewFalseVal);
2887 Value *NewSelect = Builder.CreateSelect(
2888 Cond, NewTrue, NewFalse, /*Name=*/"",
2889 /*MDFrom=*/(ProfcheckDisableMetadataFixes ? nullptr : Select));
2890 GEPNoWrapFlags Flags =
2892 return replaceInstUsesWith(GEP,
2893 Builder.CreateGEP(GEP.getResultElementType(),
2894 Src->getPointerOperand(),
2895 NewSelect, "", Flags));
2896 }
2897 }
2898
2899 // Find out whether the last index in the source GEP is a sequential idx.
2900 bool EndsWithSequential = false;
2901 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2902 I != E; ++I)
2903 EndsWithSequential = I.isSequential();
2904 if (!EndsWithSequential)
2905 return nullptr;
2906
2907 // Replace: gep (gep %P, long B), long A, ...
2908 // With: T = long A+B; gep %P, T, ...
2909 Value *SO1 = Src->getOperand(Src->getNumOperands() - 1);
2910 Value *GO1 = GEP.getOperand(1);
2911
2912 // If they aren't the same type, then the input hasn't been processed
2913 // by the loop above yet (which canonicalizes sequential index types to
2914 // intptr_t). Just avoid transforming this until the input has been
2915 // normalized.
2916 if (SO1->getType() != GO1->getType())
2917 return nullptr;
2918
2919 Value *Sum =
2920 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2921 // Only do the combine when we are sure the cost after the
2922 // merge is never more than that before the merge.
2923 if (Sum == nullptr)
2924 return nullptr;
2925
2927 Indices.append(Src->op_begin() + 1, Src->op_end() - 1);
2928 Indices.push_back(Sum);
2929 Indices.append(GEP.op_begin() + 2, GEP.op_end());
2930
2931 // Don't create GEPs with more than one non-zero index.
2932 unsigned NumNonZeroIndices = count_if(Indices, [](Value *Idx) {
2933 auto *C = dyn_cast<Constant>(Idx);
2934 return !C || !C->isNullValue();
2935 });
2936 if (NumNonZeroIndices > 1)
2937 return nullptr;
2938
2939 return replaceInstUsesWith(
2940 GEP, Builder.CreateGEP(
2941 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2943}
2944
2947 bool &DoesConsume, unsigned Depth) {
2948 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2949 // ~(~(X)) -> X.
2950 Value *A, *B;
2951 if (match(V, m_Not(m_Value(A)))) {
2952 DoesConsume = true;
2953 return A;
2954 }
2955
2956 Constant *C;
2957 // Constants can be considered to be not'ed values.
2958 if (match(V, m_ImmConstant(C)))
2959 return ConstantExpr::getNot(C);
2960
2962 return nullptr;
2963
2964 // The rest of the cases require that we invert all uses so don't bother
2965 // doing the analysis if we know we can't use the result.
2966 if (!WillInvertAllUses)
2967 return nullptr;
2968
2969 // Compares can be inverted if all of their uses are being modified to use
2970 // the ~V.
2971 if (auto *I = dyn_cast<CmpInst>(V)) {
2972 if (Builder != nullptr)
2973 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2974 I->getOperand(1));
2975 return NonNull;
2976 }
2977
2978 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2979 // `(-1 - B) - A` if we are willing to invert all of the uses.
2980 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2981 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2982 DoesConsume, Depth))
2983 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2984 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2985 DoesConsume, Depth))
2986 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2987 return nullptr;
2988 }
2989
2990 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2991 // into `A ^ B` if we are willing to invert all of the uses.
2992 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2993 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2994 DoesConsume, Depth))
2995 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2996 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2997 DoesConsume, Depth))
2998 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2999 return nullptr;
3000 }
3001
3002 // If `V` is of the form `B - A` then `-1 - V` can be folded into
3003 // `A + (-1 - B)` if we are willing to invert all of the uses.
3004 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
3005 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3006 DoesConsume, Depth))
3007 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
3008 return nullptr;
3009 }
3010
3011 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
3012 // into `A s>> B` if we are willing to invert all of the uses.
3013 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
3014 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3015 DoesConsume, Depth))
3016 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
3017 return nullptr;
3018 }
3019
3020 Value *Cond;
3021 // LogicOps are special in that we canonicalize them at the cost of an
3022 // instruction.
3023 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
3025 // Selects/min/max with invertible operands are freely invertible
3026 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
3027 bool LocalDoesConsume = DoesConsume;
3028 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
3029 LocalDoesConsume, Depth))
3030 return nullptr;
3031 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3032 LocalDoesConsume, Depth)) {
3033 DoesConsume = LocalDoesConsume;
3034 if (Builder != nullptr) {
3035 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3036 DoesConsume, Depth);
3037 assert(NotB != nullptr &&
3038 "Unable to build inverted value for known freely invertable op");
3039 if (auto *II = dyn_cast<IntrinsicInst>(V))
3040 return Builder->CreateBinaryIntrinsic(
3041 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
3042 return Builder->CreateSelect(
3043 Cond, NotA, NotB, "",
3045 }
3046 return NonNull;
3047 }
3048 }
3049
3050 if (PHINode *PN = dyn_cast<PHINode>(V)) {
3051 bool LocalDoesConsume = DoesConsume;
3053 for (Use &U : PN->operands()) {
3054 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
3055 Value *NewIncomingVal = getFreelyInvertedImpl(
3056 U.get(), /*WillInvertAllUses=*/false,
3057 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
3058 if (NewIncomingVal == nullptr)
3059 return nullptr;
3060 // Make sure that we can safely erase the original PHI node.
3061 if (NewIncomingVal == V)
3062 return nullptr;
3063 if (Builder != nullptr)
3064 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
3065 }
3066
3067 DoesConsume = LocalDoesConsume;
3068 if (Builder != nullptr) {
3070 Builder->SetInsertPoint(PN);
3071 PHINode *NewPN =
3072 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
3073 for (auto [Val, Pred] : IncomingValues)
3074 NewPN->addIncoming(Val, Pred);
3075 return NewPN;
3076 }
3077 return NonNull;
3078 }
3079
3080 if (match(V, m_SExtLike(m_Value(A)))) {
3081 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3082 DoesConsume, Depth))
3083 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
3084 return nullptr;
3085 }
3086
3087 if (match(V, m_Trunc(m_Value(A)))) {
3088 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3089 DoesConsume, Depth))
3090 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
3091 return nullptr;
3092 }
3093
3094 // De Morgan's Laws:
3095 // (~(A | B)) -> (~A & ~B)
3096 // (~(A & B)) -> (~A | ~B)
3097 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
3098 bool IsLogical, Value *A,
3099 Value *B) -> Value * {
3100 bool LocalDoesConsume = DoesConsume;
3101 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
3102 LocalDoesConsume, Depth))
3103 return nullptr;
3104 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3105 LocalDoesConsume, Depth)) {
3106 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3107 LocalDoesConsume, Depth);
3108 DoesConsume = LocalDoesConsume;
3109 if (IsLogical)
3110 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
3111 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
3112 }
3113
3114 return nullptr;
3115 };
3116
3117 if (match(V, m_Or(m_Value(A), m_Value(B))))
3118 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
3119 B);
3120
3121 if (match(V, m_And(m_Value(A), m_Value(B))))
3122 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
3123 B);
3124
3125 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
3126 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
3127 B);
3128
3129 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
3130 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
3131 B);
3132
3133 return nullptr;
3134}
3135
3136/// Return true if we should canonicalize the gep to an i8 ptradd.
3138 Value *PtrOp = GEP.getOperand(0);
3139 Type *GEPEltType = GEP.getSourceElementType();
3140 if (GEPEltType->isIntegerTy(8))
3141 return false;
3142
3143 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
3144 // intrinsic. This has better support in BasicAA.
3145 if (GEPEltType->isScalableTy())
3146 return true;
3147
3148 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
3149 // together.
3150 if (GEP.getNumIndices() == 1 &&
3151 match(GEP.getOperand(1),
3153 m_Shl(m_Value(), m_ConstantInt())))))
3154 return true;
3155
3156 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
3157 // possibly be merged together.
3158 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
3159 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3160 any_of(GEP.indices(), [](Value *V) {
3161 const APInt *C;
3162 return match(V, m_APInt(C)) && !C->isZero();
3163 });
3164}
3165
3167 IRBuilderBase &Builder) {
3168 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
3169 if (!Op1)
3170 return nullptr;
3171
3172 // Don't fold a GEP into itself through a PHI node. This can only happen
3173 // through the back-edge of a loop. Folding a GEP into itself means that
3174 // the value of the previous iteration needs to be stored in the meantime,
3175 // thus requiring an additional register variable to be live, but not
3176 // actually achieving anything (the GEP still needs to be executed once per
3177 // loop iteration).
3178 if (Op1 == &GEP)
3179 return nullptr;
3180 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
3181
3182 int DI = -1;
3183
3184 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
3185 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
3186 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3187 Op1->getSourceElementType() != Op2->getSourceElementType())
3188 return nullptr;
3189
3190 // As for Op1 above, don't try to fold a GEP into itself.
3191 if (Op2 == &GEP)
3192 return nullptr;
3193
3194 // Keep track of the type as we walk the GEP.
3195 Type *CurTy = nullptr;
3196
3197 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
3198 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3199 return nullptr;
3200
3201 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3202 if (DI == -1) {
3203 // We have not seen any differences yet in the GEPs feeding the
3204 // PHI yet, so we record this one if it is allowed to be a
3205 // variable.
3206
3207 // The first two arguments can vary for any GEP, the rest have to be
3208 // static for struct slots
3209 if (J > 1) {
3210 assert(CurTy && "No current type?");
3211 if (CurTy->isStructTy())
3212 return nullptr;
3213 }
3214
3215 DI = J;
3216 } else {
3217 // The GEP is different by more than one input. While this could be
3218 // extended to support GEPs that vary by more than one variable it
3219 // doesn't make sense since it greatly increases the complexity and
3220 // would result in an R+R+R addressing mode which no backend
3221 // directly supports and would need to be broken into several
3222 // simpler instructions anyway.
3223 return nullptr;
3224 }
3225 }
3226
3227 // Sink down a layer of the type for the next iteration.
3228 if (J > 0) {
3229 if (J == 1) {
3230 CurTy = Op1->getSourceElementType();
3231 } else {
3232 CurTy =
3233 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
3234 }
3235 }
3236 }
3237
3238 NW &= Op2->getNoWrapFlags();
3239 }
3240
3241 // If not all GEPs are identical we'll have to create a new PHI node.
3242 // Check that the old PHI node has only one use so that it will get
3243 // removed.
3244 if (DI != -1 && !PN->hasOneUse())
3245 return nullptr;
3246
3247 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
3248 NewGEP->setNoWrapFlags(NW);
3249
3250 if (DI == -1) {
3251 // All the GEPs feeding the PHI are identical. Clone one down into our
3252 // BB so that it can be merged with the current GEP.
3253 } else {
3254 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
3255 // into the current block so it can be merged, and create a new PHI to
3256 // set that index.
3257 PHINode *NewPN;
3258 {
3259 IRBuilderBase::InsertPointGuard Guard(Builder);
3260 Builder.SetInsertPoint(PN);
3261 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3262 PN->getNumOperands());
3263 }
3264
3265 for (auto &I : PN->operands())
3266 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
3267 PN->getIncomingBlock(I));
3268
3269 NewGEP->setOperand(DI, NewPN);
3270 }
3271
3272 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
3273 return NewGEP;
3274}
3275
3277 Value *PtrOp = GEP.getOperand(0);
3278 SmallVector<Value *, 8> Indices(GEP.indices());
3279 Type *GEPType = GEP.getType();
3280 Type *GEPEltType = GEP.getSourceElementType();
3281 if (Value *V =
3282 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
3283 SQ.getWithInstruction(&GEP)))
3284 return replaceInstUsesWith(GEP, V);
3285
3286 // For vector geps, use the generic demanded vector support.
3287 // Skip if GEP return type is scalable. The number of elements is unknown at
3288 // compile-time.
3289 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
3290 auto VWidth = GEPFVTy->getNumElements();
3291 APInt PoisonElts(VWidth, 0);
3292 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
3293 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
3294 PoisonElts)) {
3295 if (V != &GEP)
3296 return replaceInstUsesWith(GEP, V);
3297 return &GEP;
3298 }
3299 }
3300
3301 // Eliminate unneeded casts for indices, and replace indices which displace
3302 // by multiples of a zero size type with zero.
3303 bool MadeChange = false;
3304
3305 // Index width may not be the same width as pointer width.
3306 // Data layout chooses the right type based on supported integer types.
3307 Type *NewScalarIndexTy =
3308 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
3309
3311 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
3312 ++I, ++GTI) {
3313 // Skip indices into struct types.
3314 if (GTI.isStruct())
3315 continue;
3316
3317 Type *IndexTy = (*I)->getType();
3318 Type *NewIndexType =
3319 IndexTy->isVectorTy()
3320 ? VectorType::get(NewScalarIndexTy,
3321 cast<VectorType>(IndexTy)->getElementCount())
3322 : NewScalarIndexTy;
3323
3324 // If the element type has zero size then any index over it is equivalent
3325 // to an index of zero, so replace it with zero if it is not zero already.
3326 Type *EltTy = GTI.getIndexedType();
3327 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
3328 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
3329 *I = Constant::getNullValue(NewIndexType);
3330 MadeChange = true;
3331 }
3332
3333 if (IndexTy != NewIndexType) {
3334 // If we are using a wider index than needed for this platform, shrink
3335 // it to what we need. If narrower, sign-extend it to what we need.
3336 // This explicit cast can make subsequent optimizations more obvious.
3337 if (IndexTy->getScalarSizeInBits() <
3338 NewIndexType->getScalarSizeInBits()) {
3339 if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
3340 *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
3341 else
3342 *I = Builder.CreateSExt(*I, NewIndexType);
3343 } else {
3344 *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
3345 GEP.hasNoUnsignedSignedWrap());
3346 }
3347 MadeChange = true;
3348 }
3349 }
3350 if (MadeChange)
3351 return &GEP;
3352
3353 // Canonicalize constant GEPs to i8 type.
3354 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
3355 APInt Offset(DL.getIndexTypeSizeInBits(GEPType), 0);
3356 if (GEP.accumulateConstantOffset(DL, Offset))
3357 return replaceInstUsesWith(
3358 GEP, Builder.CreatePtrAdd(PtrOp, Builder.getInt(Offset), "",
3359 GEP.getNoWrapFlags()));
3360 }
3361
3363 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
3364 Value *NewGEP =
3365 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
3366 return replaceInstUsesWith(GEP, NewGEP);
3367 }
3368
3369 // Strip trailing zero indices.
3370 auto *LastIdx = dyn_cast<Constant>(Indices.back());
3371 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3372 return replaceInstUsesWith(
3373 GEP, Builder.CreateGEP(GEP.getSourceElementType(), PtrOp,
3374 drop_end(Indices), "", GEP.getNoWrapFlags()));
3375 }
3376
3377 // Strip leading zero indices.
3378 auto *FirstIdx = dyn_cast<Constant>(Indices.front());
3379 if (FirstIdx && FirstIdx->isNullValue() &&
3380 !FirstIdx->getType()->isVectorTy()) {
3382 ++GTI;
3383 if (!GTI.isStruct())
3384 return replaceInstUsesWith(GEP, Builder.CreateGEP(GTI.getIndexedType(),
3385 GEP.getPointerOperand(),
3386 drop_begin(Indices), "",
3387 GEP.getNoWrapFlags()));
3388 }
3389
3390 // Scalarize vector operands; prefer splat-of-gep.as canonical form.
3391 // Note that this looses information about undef lanes; we run it after
3392 // demanded bits to partially mitigate that loss.
3393 if (GEPType->isVectorTy() && llvm::any_of(GEP.operands(), [](Value *Op) {
3394 return Op->getType()->isVectorTy() && getSplatValue(Op);
3395 })) {
3396 SmallVector<Value *> NewOps;
3397 for (auto &Op : GEP.operands()) {
3398 if (Op->getType()->isVectorTy())
3399 if (Value *Scalar = getSplatValue(Op)) {
3400 NewOps.push_back(Scalar);
3401 continue;
3402 }
3403 NewOps.push_back(Op);
3404 }
3405
3406 Value *Res = Builder.CreateGEP(GEP.getSourceElementType(), NewOps[0],
3407 ArrayRef(NewOps).drop_front(), GEP.getName(),
3408 GEP.getNoWrapFlags());
3409 if (!Res->getType()->isVectorTy()) {
3410 ElementCount EC = cast<VectorType>(GEPType)->getElementCount();
3411 Res = Builder.CreateVectorSplat(EC, Res);
3412 }
3413 return replaceInstUsesWith(GEP, Res);
3414 }
3415
3416 bool SeenNonZeroIndex = false;
3417 for (auto [IdxNum, Idx] : enumerate(Indices)) {
3418 auto *C = dyn_cast<Constant>(Idx);
3419 if (C && C->isNullValue())
3420 continue;
3421
3422 if (!SeenNonZeroIndex) {
3423 SeenNonZeroIndex = true;
3424 continue;
3425 }
3426
3427 // GEP has multiple non-zero indices: Split it.
3428 ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
3429 Value *FrontGEP =
3430 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3431 GEP.getName() + ".split", GEP.getNoWrapFlags());
3432
3433 SmallVector<Value *> BackIndices;
3434 BackIndices.push_back(Constant::getNullValue(NewScalarIndexTy));
3435 append_range(BackIndices, drop_begin(Indices, IdxNum));
3437 GetElementPtrInst::getIndexedType(GEPEltType, FrontIndices), FrontGEP,
3438 BackIndices, GEP.getNoWrapFlags());
3439 }
3440
3441 // Check to see if the inputs to the PHI node are getelementptr instructions.
3442 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
3443 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
3444 return replaceOperand(GEP, 0, NewPtrOp);
3445 }
3446
3447 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
3448 if (Instruction *I = visitGEPOfGEP(GEP, Src))
3449 return I;
3450
3451 if (GEP.getNumIndices() == 1) {
3452 unsigned AS = GEP.getPointerAddressSpace();
3453 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3454 DL.getIndexSizeInBits(AS)) {
3455 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3456
3457 if (TyAllocSize == 1) {
3458 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3459 // but only if the result pointer is only used as if it were an integer.
3460 // (The case where the underlying object is the same is handled by
3461 // InstSimplify.)
3462 Value *X = GEP.getPointerOperand();
3463 Value *Y;
3464 if (match(GEP.getOperand(1), m_Sub(m_PtrToIntOrAddr(m_Value(Y)),
3466 GEPType == Y->getType()) {
3467 bool HasNonAddressBits =
3468 DL.getAddressSizeInBits(AS) != DL.getPointerSizeInBits(AS);
3469 bool Changed = false;
3470 GEP.replaceUsesWithIf(Y, [&](Use &U) {
3471 bool ShouldReplace =
3472 isa<PtrToAddrInst, ICmpInst>(U.getUser()) ||
3473 (!HasNonAddressBits && isa<PtrToIntInst>(U.getUser()));
3474 Changed |= ShouldReplace;
3475 return ShouldReplace;
3476 });
3477 return Changed ? &GEP : nullptr;
3478 }
3479 } else if (auto *ExactIns =
3480 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3481 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3482 Value *V;
3483 if (ExactIns->isExact()) {
3484 if ((has_single_bit(TyAllocSize) &&
3485 match(GEP.getOperand(1),
3486 m_Shr(m_Value(V),
3487 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3488 match(GEP.getOperand(1),
3489 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3490 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3491 GEP.getPointerOperand(), V,
3492 GEP.getNoWrapFlags());
3493 }
3494 }
3495 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3496 // Try to canonicalize non-i8 element type to i8 if the index is an
3497 // exact instruction. If the index is an exact instruction (div/shr)
3498 // with a constant RHS, we can fold the non-i8 element scale into the
3499 // div/shr (similiar to the mul case, just inverted).
3500 const APInt *C;
3501 std::optional<APInt> NewC;
3502 if (has_single_bit(TyAllocSize) &&
3503 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3504 C->uge(countr_zero(TyAllocSize)))
3505 NewC = *C - countr_zero(TyAllocSize);
3506 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3507 APInt Quot;
3508 uint64_t Rem;
3509 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3510 if (Rem == 0)
3511 NewC = Quot;
3512 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3513 APInt Quot;
3514 int64_t Rem;
3515 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3516 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3517 if (!Quot.isAllOnes() && Rem == 0)
3518 NewC = Quot;
3519 }
3520
3521 if (NewC.has_value()) {
3522 Value *NewOp = Builder.CreateBinOp(
3523 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3524 ConstantInt::get(V->getType(), *NewC));
3525 cast<BinaryOperator>(NewOp)->setIsExact();
3526 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3527 GEP.getPointerOperand(), NewOp,
3528 GEP.getNoWrapFlags());
3529 }
3530 }
3531 }
3532 }
3533 }
3534 // We do not handle pointer-vector geps here.
3535 if (GEPType->isVectorTy())
3536 return nullptr;
3537
3538 if (!GEP.isInBounds()) {
3539 unsigned IdxWidth =
3540 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
3541 APInt BasePtrOffset(IdxWidth, 0);
3542 Value *UnderlyingPtrOp =
3543 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, BasePtrOffset);
3544 bool CanBeNull, CanBeFreed;
3545 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3546 DL, CanBeNull, CanBeFreed);
3547 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3548 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3549 BasePtrOffset.isNonNegative()) {
3550 APInt AllocSize(IdxWidth, DerefBytes);
3551 if (BasePtrOffset.ule(AllocSize)) {
3553 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3554 }
3555 }
3556 }
3557 }
3558
3559 // nusw + nneg -> nuw
3560 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3561 all_of(GEP.indices(), [&](Value *Idx) {
3562 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3563 })) {
3564 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3565 return &GEP;
3566 }
3567
3568 // These rewrites are trying to preserve inbounds/nuw attributes. So we want
3569 // to do this after having tried to derive "nuw" above.
3570 if (GEP.getNumIndices() == 1) {
3571 // Given (gep p, x+y) we want to determine the common nowrap flags for both
3572 // geps if transforming into (gep (gep p, x), y).
3573 auto GetPreservedNoWrapFlags = [&](bool AddIsNUW) {
3574 // We can preserve both "inbounds nuw", "nusw nuw" and "nuw" if we know
3575 // that x + y does not have unsigned wrap.
3576 if (GEP.hasNoUnsignedWrap() && AddIsNUW)
3577 return GEP.getNoWrapFlags();
3578 return GEPNoWrapFlags::none();
3579 };
3580
3581 // Try to replace ADD + GEP with GEP + GEP.
3582 Value *Idx1, *Idx2;
3583 if (match(GEP.getOperand(1),
3584 m_OneUse(m_AddLike(m_Value(Idx1), m_Value(Idx2))))) {
3585 // %idx = add i64 %idx1, %idx2
3586 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3587 // as:
3588 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3589 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3590 bool NUW = match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()));
3591 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3592 auto *NewPtr =
3593 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3594 Idx1, "", NWFlags);
3595 return replaceInstUsesWith(GEP,
3596 Builder.CreateGEP(GEP.getSourceElementType(),
3597 NewPtr, Idx2, "", NWFlags));
3598 }
3599 ConstantInt *C;
3600 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAddLike(
3601 m_Value(Idx1), m_ConstantInt(C))))))) {
3602 // %add = add nsw i32 %idx1, idx2
3603 // %sidx = sext i32 %add to i64
3604 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3605 // as:
3606 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3607 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3608 bool NUW = match(GEP.getOperand(1),
3610 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3611 auto *NewPtr = Builder.CreateGEP(
3612 GEP.getSourceElementType(), GEP.getPointerOperand(),
3613 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "", NWFlags);
3614 return replaceInstUsesWith(
3615 GEP,
3616 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3617 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3618 "", NWFlags));
3619 }
3620 }
3621
3623 return R;
3624
3625 return nullptr;
3626}
3627
3629 Instruction *AI) {
3631 return true;
3632 if (auto *LI = dyn_cast<LoadInst>(V))
3633 return isa<GlobalVariable>(LI->getPointerOperand());
3634 // Two distinct allocations will never be equal.
3635 return isAllocLikeFn(V, &TLI) && V != AI;
3636}
3637
3638/// Given a call CB which uses an address UsedV, return true if we can prove the
3639/// call's only possible effect is storing to V.
3640static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3641 const TargetLibraryInfo &TLI) {
3642 if (!CB.use_empty())
3643 // TODO: add recursion if returned attribute is present
3644 return false;
3645
3646 if (CB.isTerminator())
3647 // TODO: remove implementation restriction
3648 return false;
3649
3650 if (!CB.willReturn() || !CB.doesNotThrow())
3651 return false;
3652
3653 // If the only possible side effect of the call is writing to the alloca,
3654 // and the result isn't used, we can safely remove any reads implied by the
3655 // call including those which might read the alloca itself.
3656 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3657 return Dest && Dest->Ptr == UsedV;
3658}
3659
3660static std::optional<ModRefInfo>
3662 const TargetLibraryInfo &TLI, bool KnowInit) {
3664 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3665 Worklist.push_back(AI);
3667
3668 do {
3669 Instruction *PI = Worklist.pop_back_val();
3670 for (User *U : PI->users()) {
3672 switch (I->getOpcode()) {
3673 default:
3674 // Give up the moment we see something we can't handle.
3675 return std::nullopt;
3676
3677 case Instruction::AddrSpaceCast:
3678 case Instruction::BitCast:
3679 case Instruction::GetElementPtr:
3680 Users.emplace_back(I);
3681 Worklist.push_back(I);
3682 continue;
3683
3684 case Instruction::ICmp: {
3685 ICmpInst *ICI = cast<ICmpInst>(I);
3686 // We can fold eq/ne comparisons with null to false/true, respectively.
3687 // We also fold comparisons in some conditions provided the alloc has
3688 // not escaped (see isNeverEqualToUnescapedAlloc).
3689 if (!ICI->isEquality())
3690 return std::nullopt;
3691 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3692 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3693 return std::nullopt;
3694
3695 // Do not fold compares to aligned_alloc calls, as they may have to
3696 // return null in case the required alignment cannot be satisfied,
3697 // unless we can prove that both alignment and size are valid.
3698 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3699 // Check if alignment and size of a call to aligned_alloc is valid,
3700 // that is alignment is a power-of-2 and the size is a multiple of the
3701 // alignment.
3702 const APInt *Alignment;
3703 const APInt *Size;
3704 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3705 match(CB->getArgOperand(1), m_APInt(Size)) &&
3706 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3707 };
3708 auto *CB = dyn_cast<CallBase>(AI);
3709 LibFunc TheLibFunc;
3710 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3711 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3712 !AlignmentAndSizeKnownValid(CB))
3713 return std::nullopt;
3714 Users.emplace_back(I);
3715 continue;
3716 }
3717
3718 case Instruction::Call:
3719 // Ignore no-op and store intrinsics.
3721 switch (II->getIntrinsicID()) {
3722 default:
3723 return std::nullopt;
3724
3725 case Intrinsic::memmove:
3726 case Intrinsic::memcpy:
3727 case Intrinsic::memset: {
3729 if (MI->isVolatile())
3730 return std::nullopt;
3731 // Note: this could also be ModRef, but we can still interpret that
3732 // as just Mod in that case.
3733 ModRefInfo NewAccess =
3734 MI->getRawDest() == PI ? ModRefInfo::Mod : ModRefInfo::Ref;
3735 if ((Access & ~NewAccess) != ModRefInfo::NoModRef)
3736 return std::nullopt;
3737 Access |= NewAccess;
3738 [[fallthrough]];
3739 }
3740 case Intrinsic::assume:
3741 case Intrinsic::invariant_start:
3742 case Intrinsic::invariant_end:
3743 case Intrinsic::lifetime_start:
3744 case Intrinsic::lifetime_end:
3745 case Intrinsic::objectsize:
3746 Users.emplace_back(I);
3747 continue;
3748 case Intrinsic::launder_invariant_group:
3749 case Intrinsic::strip_invariant_group:
3750 Users.emplace_back(I);
3751 Worklist.push_back(I);
3752 continue;
3753 }
3754 }
3755
3756 if (Family && getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3757 getAllocationFamily(I, &TLI) == Family) {
3758 Users.emplace_back(I);
3759 continue;
3760 }
3761
3762 if (Family && getReallocatedOperand(cast<CallBase>(I)) == PI &&
3763 getAllocationFamily(I, &TLI) == Family) {
3764 Users.emplace_back(I);
3765 Worklist.push_back(I);
3766 continue;
3767 }
3768
3769 if (!isRefSet(Access) &&
3770 isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3772 Users.emplace_back(I);
3773 continue;
3774 }
3775
3776 return std::nullopt;
3777
3778 case Instruction::Store: {
3780 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3781 return std::nullopt;
3782 if (isRefSet(Access))
3783 return std::nullopt;
3785 Users.emplace_back(I);
3786 continue;
3787 }
3788
3789 case Instruction::Load: {
3790 LoadInst *LI = cast<LoadInst>(I);
3791 if (LI->isVolatile() || LI->getPointerOperand() != PI)
3792 return std::nullopt;
3793 if (isModSet(Access))
3794 return std::nullopt;
3796 Users.emplace_back(I);
3797 continue;
3798 }
3799 }
3800 llvm_unreachable("missing a return?");
3801 }
3802 } while (!Worklist.empty());
3803
3805 return Access;
3806}
3807
3810
3811 // If we have a malloc call which is only used in any amount of comparisons to
3812 // null and free calls, delete the calls and replace the comparisons with true
3813 // or false as appropriate.
3814
3815 // This is based on the principle that we can substitute our own allocation
3816 // function (which will never return null) rather than knowledge of the
3817 // specific function being called. In some sense this can change the permitted
3818 // outputs of a program (when we convert a malloc to an alloca, the fact that
3819 // the allocation is now on the stack is potentially visible, for example),
3820 // but we believe in a permissible manner.
3822
3823 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3824 // before each store.
3826 std::unique_ptr<DIBuilder> DIB;
3827 if (isa<AllocaInst>(MI)) {
3828 findDbgUsers(&MI, DVRs);
3829 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3830 }
3831
3832 // Determine what getInitialValueOfAllocation would return without actually
3833 // allocating the result.
3834 bool KnowInitUndef = false;
3835 bool KnowInitZero = false;
3836 Constant *Init =
3838 if (Init) {
3839 if (isa<UndefValue>(Init))
3840 KnowInitUndef = true;
3841 else if (Init->isNullValue())
3842 KnowInitZero = true;
3843 }
3844 // The various sanitizers don't actually return undef memory, but rather
3845 // memory initialized with special forms of runtime poison
3846 auto &F = *MI.getFunction();
3847 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
3848 F.hasFnAttribute(Attribute::SanitizeAddress))
3849 KnowInitUndef = false;
3850
3851 auto Removable =
3852 isAllocSiteRemovable(&MI, Users, TLI, KnowInitZero | KnowInitUndef);
3853 if (Removable) {
3854 for (WeakTrackingVH &User : Users) {
3855 // Lowering all @llvm.objectsize and MTI calls first because they may use
3856 // a bitcast/GEP of the alloca we are removing.
3857 if (!User)
3858 continue;
3859
3861
3863 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3864 SmallVector<Instruction *> InsertedInstructions;
3865 Value *Result = lowerObjectSizeCall(
3866 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3867 for (Instruction *Inserted : InsertedInstructions)
3868 Worklist.add(Inserted);
3869 replaceInstUsesWith(*I, Result);
3871 User = nullptr; // Skip examining in the next loop.
3872 continue;
3873 }
3874 if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
3875 if (KnowInitZero && isRefSet(*Removable)) {
3877 Builder.SetInsertPoint(MTI);
3878 auto *M = Builder.CreateMemSet(
3879 MTI->getRawDest(),
3880 ConstantInt::get(Type::getInt8Ty(MI.getContext()), 0),
3881 MTI->getLength(), MTI->getDestAlign());
3882 M->copyMetadata(*MTI);
3883 }
3884 }
3885 }
3886 }
3887 for (WeakTrackingVH &User : Users) {
3888 if (!User)
3889 continue;
3890
3892
3893 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3895 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3896 C->isFalseWhenEqual()));
3897 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3898 for (auto *DVR : DVRs)
3899 if (DVR->isAddressOfVariable())
3901 } else {
3902 // Casts, GEP, or anything else: we're about to delete this instruction,
3903 // so it can not have any valid uses.
3904 Constant *Replace;
3905 if (isa<LoadInst>(I)) {
3906 assert(KnowInitZero || KnowInitUndef);
3907 Replace = KnowInitUndef ? UndefValue::get(I->getType())
3908 : Constant::getNullValue(I->getType());
3909 } else
3910 Replace = PoisonValue::get(I->getType());
3911 replaceInstUsesWith(*I, Replace);
3912 }
3914 }
3915
3917 // Replace invoke with a NOP intrinsic to maintain the original CFG
3918 Module *M = II->getModule();
3919 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3920 auto *NewII = InvokeInst::Create(
3921 F, II->getNormalDest(), II->getUnwindDest(), {}, "", II->getParent());
3922 NewII->setDebugLoc(II->getDebugLoc());
3923 }
3924
3925 // Remove debug intrinsics which describe the value contained within the
3926 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3927 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3928 //
3929 // ```
3930 // define void @foo(i32 %0) {
3931 // %a = alloca i32 ; Deleted.
3932 // store i32 %0, i32* %a
3933 // dbg.value(i32 %0, "arg0") ; Not deleted.
3934 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3935 // call void @trivially_inlinable_no_op(i32* %a)
3936 // ret void
3937 // }
3938 // ```
3939 //
3940 // This may not be required if we stop describing the contents of allocas
3941 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3942 // the LowerDbgDeclare utility.
3943 //
3944 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3945 // "arg0" dbg.value may be stale after the call. However, failing to remove
3946 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3947 //
3948 // FIXME: the Assignment Tracking project has now likely made this
3949 // redundant (and it's sometimes harmful).
3950 for (auto *DVR : DVRs)
3951 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3952 DVR->eraseFromParent();
3953
3954 return eraseInstFromFunction(MI);
3955 }
3956 return nullptr;
3957}
3958
3959/// Move the call to free before a NULL test.
3960///
3961/// Check if this free is accessed after its argument has been test
3962/// against NULL (property 0).
3963/// If yes, it is legal to move this call in its predecessor block.
3964///
3965/// The move is performed only if the block containing the call to free
3966/// will be removed, i.e.:
3967/// 1. it has only one predecessor P, and P has two successors
3968/// 2. it contains the call, noops, and an unconditional branch
3969/// 3. its successor is the same as its predecessor's successor
3970///
3971/// The profitability is out-of concern here and this function should
3972/// be called only if the caller knows this transformation would be
3973/// profitable (e.g., for code size).
3975 const DataLayout &DL) {
3976 Value *Op = FI.getArgOperand(0);
3977 BasicBlock *FreeInstrBB = FI.getParent();
3978 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3979
3980 // Validate part of constraint #1: Only one predecessor
3981 // FIXME: We can extend the number of predecessor, but in that case, we
3982 // would duplicate the call to free in each predecessor and it may
3983 // not be profitable even for code size.
3984 if (!PredBB)
3985 return nullptr;
3986
3987 // Validate constraint #2: Does this block contains only the call to
3988 // free, noops, and an unconditional branch?
3989 BasicBlock *SuccBB;
3990 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3991 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3992 return nullptr;
3993
3994 // If there are only 2 instructions in the block, at this point,
3995 // this is the call to free and unconditional.
3996 // If there are more than 2 instructions, check that they are noops
3997 // i.e., they won't hurt the performance of the generated code.
3998 if (FreeInstrBB->size() != 2) {
3999 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
4000 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
4001 continue;
4002 auto *Cast = dyn_cast<CastInst>(&Inst);
4003 if (!Cast || !Cast->isNoopCast(DL))
4004 return nullptr;
4005 }
4006 }
4007 // Validate the rest of constraint #1 by matching on the pred branch.
4008 Instruction *TI = PredBB->getTerminator();
4009 BasicBlock *TrueBB, *FalseBB;
4010 CmpPredicate Pred;
4011 if (!match(TI, m_Br(m_ICmp(Pred,
4013 m_Specific(Op->stripPointerCasts())),
4014 m_Zero()),
4015 TrueBB, FalseBB)))
4016 return nullptr;
4017 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
4018 return nullptr;
4019
4020 // Validate constraint #3: Ensure the null case just falls through.
4021 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
4022 return nullptr;
4023 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
4024 "Broken CFG: missing edge from predecessor to successor");
4025
4026 // At this point, we know that everything in FreeInstrBB can be moved
4027 // before TI.
4028 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
4029 if (&Instr == FreeInstrBBTerminator)
4030 break;
4031 Instr.moveBeforePreserving(TI->getIterator());
4032 }
4033 assert(FreeInstrBB->size() == 1 &&
4034 "Only the branch instruction should remain");
4035
4036 // Now that we've moved the call to free before the NULL check, we have to
4037 // remove any attributes on its parameter that imply it's non-null, because
4038 // those attributes might have only been valid because of the NULL check, and
4039 // we can get miscompiles if we keep them. This is conservative if non-null is
4040 // also implied by something other than the NULL check, but it's guaranteed to
4041 // be correct, and the conservativeness won't matter in practice, since the
4042 // attributes are irrelevant for the call to free itself and the pointer
4043 // shouldn't be used after the call.
4044 AttributeList Attrs = FI.getAttributes();
4045 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
4046 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
4047 if (Dereferenceable.isValid()) {
4048 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
4049 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
4050 Attribute::Dereferenceable);
4051 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
4052 }
4053 FI.setAttributes(Attrs);
4054
4055 return &FI;
4056}
4057
4059 // free undef -> unreachable.
4060 if (isa<UndefValue>(Op)) {
4061 // Leave a marker since we can't modify the CFG here.
4063 return eraseInstFromFunction(FI);
4064 }
4065
4066 // If we have 'free null' delete the instruction. This can happen in stl code
4067 // when lots of inlining happens.
4069 return eraseInstFromFunction(FI);
4070
4071 // If we had free(realloc(...)) with no intervening uses, then eliminate the
4072 // realloc() entirely.
4074 if (CI && CI->hasOneUse())
4075 if (Value *ReallocatedOp = getReallocatedOperand(CI))
4076 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
4077
4078 // If we optimize for code size, try to move the call to free before the null
4079 // test so that simplify cfg can remove the empty block and dead code
4080 // elimination the branch. I.e., helps to turn something like:
4081 // if (foo) free(foo);
4082 // into
4083 // free(foo);
4084 //
4085 // Note that we can only do this for 'free' and not for any flavor of
4086 // 'operator delete'; there is no 'operator delete' symbol for which we are
4087 // permitted to invent a call, even if we're passing in a null pointer.
4088 if (MinimizeSize) {
4089 LibFunc Func;
4090 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
4092 return I;
4093 }
4094
4095 return nullptr;
4096}
4097
4099 Value *RetVal = RI.getReturnValue();
4100 if (!RetVal)
4101 return nullptr;
4102
4103 Function *F = RI.getFunction();
4104 Type *RetTy = RetVal->getType();
4105 if (RetTy->isPointerTy()) {
4106 bool HasDereferenceable =
4107 F->getAttributes().getRetDereferenceableBytes() > 0;
4108 if (F->hasRetAttribute(Attribute::NonNull) ||
4109 (HasDereferenceable &&
4111 if (Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
4112 return replaceOperand(RI, 0, V);
4113 }
4114 }
4115
4116 if (!AttributeFuncs::isNoFPClassCompatibleType(RetTy))
4117 return nullptr;
4118
4119 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
4120 if (ReturnClass == fcNone)
4121 return nullptr;
4122
4123 KnownFPClass KnownClass;
4124 if (SimplifyDemandedFPClass(&RI, 0, ~ReturnClass, KnownClass))
4125 return &RI;
4126
4127 return nullptr;
4128}
4129
4130// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
4132 // Try to remove the previous instruction if it must lead to unreachable.
4133 // This includes instructions like stores and "llvm.assume" that may not get
4134 // removed by simple dead code elimination.
4135 bool Changed = false;
4136 while (Instruction *Prev = I.getPrevNode()) {
4137 // While we theoretically can erase EH, that would result in a block that
4138 // used to start with an EH no longer starting with EH, which is invalid.
4139 // To make it valid, we'd need to fixup predecessors to no longer refer to
4140 // this block, but that changes CFG, which is not allowed in InstCombine.
4141 if (Prev->isEHPad())
4142 break; // Can not drop any more instructions. We're done here.
4143
4145 break; // Can not drop any more instructions. We're done here.
4146 // Otherwise, this instruction can be freely erased,
4147 // even if it is not side-effect free.
4148
4149 // A value may still have uses before we process it here (for example, in
4150 // another unreachable block), so convert those to poison.
4151 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
4152 eraseInstFromFunction(*Prev);
4153 Changed = true;
4154 }
4155 return Changed;
4156}
4157
4162
4164 assert(BI.isUnconditional() && "Only for unconditional branches.");
4165
4166 // If this store is the second-to-last instruction in the basic block
4167 // (excluding debug info) and if the block ends with
4168 // an unconditional branch, try to move the store to the successor block.
4169
4170 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
4171 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
4172 do {
4173 if (BBI != FirstInstr)
4174 --BBI;
4175 } while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4176
4177 return dyn_cast<StoreInst>(BBI);
4178 };
4179
4180 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
4182 return &BI;
4183
4184 return nullptr;
4185}
4186
4189 if (!DeadEdges.insert({From, To}).second)
4190 return;
4191
4192 // Replace phi node operands in successor with poison.
4193 for (PHINode &PN : To->phis())
4194 for (Use &U : PN.incoming_values())
4195 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
4196 replaceUse(U, PoisonValue::get(PN.getType()));
4197 addToWorklist(&PN);
4198 MadeIRChange = true;
4199 }
4200
4201 Worklist.push_back(To);
4202}
4203
4204// Under the assumption that I is unreachable, remove it and following
4205// instructions. Changes are reported directly to MadeIRChange.
4208 BasicBlock *BB = I->getParent();
4209 for (Instruction &Inst : make_early_inc_range(
4210 make_range(std::next(BB->getTerminator()->getReverseIterator()),
4211 std::next(I->getReverseIterator())))) {
4212 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4213 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
4214 MadeIRChange = true;
4215 }
4216 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4217 continue;
4218 // RemoveDIs: erase debug-info on this instruction manually.
4219 Inst.dropDbgRecords();
4221 MadeIRChange = true;
4222 }
4223
4226 MadeIRChange = true;
4227 for (Value *V : Changed)
4229 }
4230
4231 // Handle potentially dead successors.
4232 for (BasicBlock *Succ : successors(BB))
4233 addDeadEdge(BB, Succ, Worklist);
4234}
4235
4238 while (!Worklist.empty()) {
4239 BasicBlock *BB = Worklist.pop_back_val();
4240 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
4241 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
4242 }))
4243 continue;
4244
4246 }
4247}
4248
4250 BasicBlock *LiveSucc) {
4252 for (BasicBlock *Succ : successors(BB)) {
4253 // The live successor isn't dead.
4254 if (Succ == LiveSucc)
4255 continue;
4256
4257 addDeadEdge(BB, Succ, Worklist);
4258 }
4259
4261}
4262
4264 if (BI.isUnconditional())
4266
4267 // Change br (not X), label True, label False to: br X, label False, True
4268 Value *Cond = BI.getCondition();
4269 Value *X;
4270 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
4271 // Swap Destinations and condition...
4272 BI.swapSuccessors();
4273 if (BPI)
4274 BPI->swapSuccEdgesProbabilities(BI.getParent());
4275 return replaceOperand(BI, 0, X);
4276 }
4277
4278 // Canonicalize logical-and-with-invert as logical-or-with-invert.
4279 // This is done by inverting the condition and swapping successors:
4280 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
4281 Value *Y;
4282 if (isa<SelectInst>(Cond) &&
4283 match(Cond,
4285 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
4286 Value *Or = Builder.CreateLogicalOr(NotX, Y);
4287
4288 // Set weights for the new OR select instruction too.
4290 if (auto *OrInst = dyn_cast<Instruction>(Or)) {
4291 if (auto *CondInst = dyn_cast<Instruction>(Cond)) {
4292 SmallVector<uint32_t> Weights;
4293 if (extractBranchWeights(*CondInst, Weights)) {
4294 assert(Weights.size() == 2 &&
4295 "Unexpected number of branch weights!");
4296 std::swap(Weights[0], Weights[1]);
4297 setBranchWeights(*OrInst, Weights, /*IsExpected=*/false);
4298 }
4299 }
4300 }
4301 }
4302 BI.swapSuccessors();
4303 if (BPI)
4304 BPI->swapSuccEdgesProbabilities(BI.getParent());
4305 return replaceOperand(BI, 0, Or);
4306 }
4307
4308 // If the condition is irrelevant, remove the use so that other
4309 // transforms on the condition become more effective.
4310 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
4311 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
4312
4313 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
4314 CmpPredicate Pred;
4315 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
4316 !isCanonicalPredicate(Pred)) {
4317 // Swap destinations and condition.
4318 auto *Cmp = cast<CmpInst>(Cond);
4319 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
4320 BI.swapSuccessors();
4321 if (BPI)
4322 BPI->swapSuccEdgesProbabilities(BI.getParent());
4323 Worklist.push(Cmp);
4324 return &BI;
4325 }
4326
4327 if (isa<UndefValue>(Cond)) {
4328 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
4329 return nullptr;
4330 }
4331 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4333 BI.getSuccessor(!CI->getZExtValue()));
4334 return nullptr;
4335 }
4336
4337 // Replace all dominated uses of the condition with true/false
4338 // Ignore constant expressions to avoid iterating over uses on other
4339 // functions.
4340 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
4341 for (auto &U : make_early_inc_range(Cond->uses())) {
4342 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
4343 if (DT.dominates(Edge0, U)) {
4344 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
4345 addToWorklist(cast<Instruction>(U.getUser()));
4346 continue;
4347 }
4348 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
4349 if (DT.dominates(Edge1, U)) {
4350 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
4351 addToWorklist(cast<Instruction>(U.getUser()));
4352 }
4353 }
4354 }
4355
4356 DC.registerBranch(&BI);
4357 return nullptr;
4358}
4359
4360// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
4361// we can prove that both (switch C) and (switch X) go to the default when cond
4362// is false/true.
4365 bool IsTrueArm) {
4366 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4367 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
4368 if (!C)
4369 return nullptr;
4370
4371 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
4372 if (CstBB != SI.getDefaultDest())
4373 return nullptr;
4374 Value *X = Select->getOperand(3 - CstOpIdx);
4375 CmpPredicate Pred;
4376 const APInt *RHSC;
4377 if (!match(Select->getCondition(),
4378 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
4379 return nullptr;
4380 if (IsTrueArm)
4381 Pred = ICmpInst::getInversePredicate(Pred);
4382
4383 // See whether we can replace the select with X
4385 for (auto Case : SI.cases())
4386 if (!CR.contains(Case.getCaseValue()->getValue()))
4387 return nullptr;
4388
4389 return X;
4390}
4391
4393 Value *Cond = SI.getCondition();
4394 Value *Op0;
4395 const APInt *CondOpC;
4396 using InvertFn = std::function<APInt(const APInt &Case, const APInt &C)>;
4397
4398 auto MaybeInvertible = [&](Value *Cond) -> InvertFn {
4399 if (match(Cond, m_Add(m_Value(Op0), m_APInt(CondOpC))))
4400 // Change 'switch (X+C) case Case:' into 'switch (X) case Case-C'.
4401 return [](const APInt &Case, const APInt &C) { return Case - C; };
4402
4403 if (match(Cond, m_Sub(m_APInt(CondOpC), m_Value(Op0))))
4404 // Change 'switch (C-X) case Case:' into 'switch (X) case C-Case'.
4405 return [](const APInt &Case, const APInt &C) { return C - Case; };
4406
4407 if (match(Cond, m_Xor(m_Value(Op0), m_APInt(CondOpC))) &&
4408 !CondOpC->isMinSignedValue() && !CondOpC->isMaxSignedValue())
4409 // Change 'switch (X^C) case Case:' into 'switch (X) case Case^C'.
4410 // Prevent creation of large case values by excluding extremes.
4411 return [](const APInt &Case, const APInt &C) { return Case ^ C; };
4412
4413 return nullptr;
4414 };
4415
4416 // Attempt to invert and simplify the switch condition, as long as the
4417 // condition is not used further, as it may not be profitable otherwise.
4418 if (auto InvertFn = MaybeInvertible(Cond); InvertFn && Cond->hasOneUse()) {
4419 for (auto &Case : SI.cases()) {
4420 const APInt &New = InvertFn(Case.getCaseValue()->getValue(), *CondOpC);
4421 Case.setValue(ConstantInt::get(SI.getContext(), New));
4422 }
4423 return replaceOperand(SI, 0, Op0);
4424 }
4425
4426 uint64_t ShiftAmt;
4427 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
4428 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
4429 all_of(SI.cases(), [&](const auto &Case) {
4430 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4431 })) {
4432 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
4434 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
4435 Shl->hasOneUse()) {
4436 Value *NewCond = Op0;
4437 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
4438 // If the shift may wrap, we need to mask off the shifted bits.
4439 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
4440 NewCond = Builder.CreateAnd(
4441 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
4442 }
4443 for (auto Case : SI.cases()) {
4444 const APInt &CaseVal = Case.getCaseValue()->getValue();
4445 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
4446 : CaseVal.lshr(ShiftAmt);
4447 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
4448 }
4449 return replaceOperand(SI, 0, NewCond);
4450 }
4451 }
4452
4453 // Fold switch(zext/sext(X)) into switch(X) if possible.
4454 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
4455 bool IsZExt = isa<ZExtInst>(Cond);
4456 Type *SrcTy = Op0->getType();
4457 unsigned NewWidth = SrcTy->getScalarSizeInBits();
4458
4459 if (all_of(SI.cases(), [&](const auto &Case) {
4460 const APInt &CaseVal = Case.getCaseValue()->getValue();
4461 return IsZExt ? CaseVal.isIntN(NewWidth)
4462 : CaseVal.isSignedIntN(NewWidth);
4463 })) {
4464 for (auto &Case : SI.cases()) {
4465 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4466 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4467 }
4468 return replaceOperand(SI, 0, Op0);
4469 }
4470 }
4471
4472 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
4473 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
4474 if (Value *V =
4475 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
4476 return replaceOperand(SI, 0, V);
4477 if (Value *V =
4478 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
4479 return replaceOperand(SI, 0, V);
4480 }
4481
4482 KnownBits Known = computeKnownBits(Cond, &SI);
4483 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
4484 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
4485
4486 // Compute the number of leading bits we can ignore.
4487 // TODO: A better way to determine this would use ComputeNumSignBits().
4488 for (const auto &C : SI.cases()) {
4489 LeadingKnownZeros =
4490 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
4491 LeadingKnownOnes =
4492 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
4493 }
4494
4495 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4496
4497 // Shrink the condition operand if the new type is smaller than the old type.
4498 // But do not shrink to a non-standard type, because backend can't generate
4499 // good code for that yet.
4500 // TODO: We can make it aggressive again after fixing PR39569.
4501 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
4502 shouldChangeType(Known.getBitWidth(), NewWidth)) {
4503 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
4504 Builder.SetInsertPoint(&SI);
4505 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
4506
4507 for (auto Case : SI.cases()) {
4508 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4509 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4510 }
4511 return replaceOperand(SI, 0, NewCond);
4512 }
4513
4514 if (isa<UndefValue>(Cond)) {
4515 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
4516 return nullptr;
4517 }
4518 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4520 SI.findCaseValue(CI)->getCaseSuccessor());
4521 return nullptr;
4522 }
4523
4524 return nullptr;
4525}
4526
4528InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
4530 if (!WO)
4531 return nullptr;
4532
4533 Intrinsic::ID OvID = WO->getIntrinsicID();
4534 const APInt *C = nullptr;
4535 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
4536 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4537 OvID == Intrinsic::umul_with_overflow)) {
4538 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
4539 if (C->isAllOnes())
4540 return BinaryOperator::CreateNeg(WO->getLHS());
4541 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4542 if (C->isPowerOf2()) {
4543 return BinaryOperator::CreateShl(
4544 WO->getLHS(),
4545 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4546 }
4547 }
4548 }
4549
4550 // We're extracting from an overflow intrinsic. See if we're the only user.
4551 // That allows us to simplify multiple result intrinsics to simpler things
4552 // that just get one value.
4553 if (!WO->hasOneUse())
4554 return nullptr;
4555
4556 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4557 // and replace it with a traditional binary instruction.
4558 if (*EV.idx_begin() == 0) {
4559 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4560 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4561 // Replace the old instruction's uses with poison.
4562 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4564 return BinaryOperator::Create(BinOp, LHS, RHS);
4565 }
4566
4567 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4568
4569 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4570 if (OvID == Intrinsic::usub_with_overflow)
4571 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4572
4573 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4574 // +1 is not possible because we assume signed values.
4575 if (OvID == Intrinsic::smul_with_overflow &&
4576 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4577 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4578
4579 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4580 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4581 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4582 // Only handle even bitwidths for performance reasons.
4583 if (BitWidth % 2 == 0)
4584 return new ICmpInst(
4585 ICmpInst::ICMP_UGT, WO->getLHS(),
4586 ConstantInt::get(WO->getLHS()->getType(),
4588 }
4589
4590 // If only the overflow result is used, and the right hand side is a
4591 // constant (or constant splat), we can remove the intrinsic by directly
4592 // checking for overflow.
4593 if (C) {
4594 // Compute the no-wrap range for LHS given RHS=C, then construct an
4595 // equivalent icmp, potentially using an offset.
4596 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
4597 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4598
4599 CmpInst::Predicate Pred;
4600 APInt NewRHSC, Offset;
4601 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4602 auto *OpTy = WO->getRHS()->getType();
4603 auto *NewLHS = WO->getLHS();
4604 if (Offset != 0)
4605 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4606 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4607 ConstantInt::get(OpTy, NewRHSC));
4608 }
4609
4610 return nullptr;
4611}
4612
4615 InstCombiner::BuilderTy &Builder) {
4616 // Helper to fold frexp of select to select of frexp.
4617
4618 if (!SelectInst->hasOneUse() || !FrexpCall->hasOneUse())
4619 return nullptr;
4621 Value *TrueVal = SelectInst->getTrueValue();
4622 Value *FalseVal = SelectInst->getFalseValue();
4623
4624 const APFloat *ConstVal = nullptr;
4625 Value *VarOp = nullptr;
4626 bool ConstIsTrue = false;
4627
4628 if (match(TrueVal, m_APFloat(ConstVal))) {
4629 VarOp = FalseVal;
4630 ConstIsTrue = true;
4631 } else if (match(FalseVal, m_APFloat(ConstVal))) {
4632 VarOp = TrueVal;
4633 ConstIsTrue = false;
4634 } else {
4635 return nullptr;
4636 }
4637
4638 Builder.SetInsertPoint(&EV);
4639
4640 CallInst *NewFrexp =
4641 Builder.CreateCall(FrexpCall->getCalledFunction(), {VarOp}, "frexp");
4642 NewFrexp->copyIRFlags(FrexpCall);
4643
4644 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0, "mantissa");
4645
4646 int Exp;
4647 APFloat Mantissa = frexp(*ConstVal, Exp, APFloat::rmNearestTiesToEven);
4648
4649 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4650
4651 Value *NewSel = Builder.CreateSelectFMF(
4652 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4653 ConstIsTrue ? NewEV : ConstantMantissa, SelectInst, "select.frexp");
4654 return NewSel;
4655}
4657 Value *Agg = EV.getAggregateOperand();
4658
4659 if (!EV.hasIndices())
4660 return replaceInstUsesWith(EV, Agg);
4661
4662 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4663 SQ.getWithInstruction(&EV)))
4664 return replaceInstUsesWith(EV, V);
4665
4666 Value *Cond, *TrueVal, *FalseVal;
4668 m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal)))))) {
4669 auto *SelInst =
4670 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4671 if (Value *Result =
4672 foldFrexpOfSelect(EV, cast<IntrinsicInst>(Agg), SelInst, Builder))
4673 return replaceInstUsesWith(EV, Result);
4674 }
4676 // We're extracting from an insertvalue instruction, compare the indices
4677 const unsigned *exti, *exte, *insi, *inse;
4678 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4679 exte = EV.idx_end(), inse = IV->idx_end();
4680 exti != exte && insi != inse;
4681 ++exti, ++insi) {
4682 if (*insi != *exti)
4683 // The insert and extract both reference distinctly different elements.
4684 // This means the extract is not influenced by the insert, and we can
4685 // replace the aggregate operand of the extract with the aggregate
4686 // operand of the insert. i.e., replace
4687 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4688 // %E = extractvalue { i32, { i32 } } %I, 0
4689 // with
4690 // %E = extractvalue { i32, { i32 } } %A, 0
4691 return ExtractValueInst::Create(IV->getAggregateOperand(),
4692 EV.getIndices());
4693 }
4694 if (exti == exte && insi == inse)
4695 // Both iterators are at the end: Index lists are identical. Replace
4696 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4697 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4698 // with "i32 42"
4699 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4700 if (exti == exte) {
4701 // The extract list is a prefix of the insert list. i.e. replace
4702 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4703 // %E = extractvalue { i32, { i32 } } %I, 1
4704 // with
4705 // %X = extractvalue { i32, { i32 } } %A, 1
4706 // %E = insertvalue { i32 } %X, i32 42, 0
4707 // by switching the order of the insert and extract (though the
4708 // insertvalue should be left in, since it may have other uses).
4709 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4710 EV.getIndices());
4711 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4712 ArrayRef(insi, inse));
4713 }
4714 if (insi == inse)
4715 // The insert list is a prefix of the extract list
4716 // We can simply remove the common indices from the extract and make it
4717 // operate on the inserted value instead of the insertvalue result.
4718 // i.e., replace
4719 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4720 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4721 // with
4722 // %E extractvalue { i32 } { i32 42 }, 0
4723 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4724 ArrayRef(exti, exte));
4725 }
4726
4727 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4728 return R;
4729
4730 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4731 // Bail out if the aggregate contains scalable vector type
4732 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4733 STy && STy->isScalableTy())
4734 return nullptr;
4735
4736 // If the (non-volatile) load only has one use, we can rewrite this to a
4737 // load from a GEP. This reduces the size of the load. If a load is used
4738 // only by extractvalue instructions then this either must have been
4739 // optimized before, or it is a struct with padding, in which case we
4740 // don't want to do the transformation as it loses padding knowledge.
4741 if (L->isSimple() && L->hasOneUse()) {
4742 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4743 SmallVector<Value*, 4> Indices;
4744 // Prefix an i32 0 since we need the first element.
4745 Indices.push_back(Builder.getInt32(0));
4746 for (unsigned Idx : EV.indices())
4747 Indices.push_back(Builder.getInt32(Idx));
4748
4749 // We need to insert these at the location of the old load, not at that of
4750 // the extractvalue.
4751 Builder.SetInsertPoint(L);
4752 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4753 L->getPointerOperand(), Indices);
4754 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
4755 // Whatever aliasing information we had for the orignal load must also
4756 // hold for the smaller load, so propagate the annotations.
4757 NL->setAAMetadata(L->getAAMetadata());
4758 // Returning the load directly will cause the main loop to insert it in
4759 // the wrong spot, so use replaceInstUsesWith().
4760 return replaceInstUsesWith(EV, NL);
4761 }
4762 }
4763
4764 if (auto *PN = dyn_cast<PHINode>(Agg))
4765 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4766 return Res;
4767
4768 // Canonicalize extract (select Cond, TV, FV)
4769 // -> select cond, (extract TV), (extract FV)
4770 if (auto *SI = dyn_cast<SelectInst>(Agg))
4771 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4772 return R;
4773
4774 // We could simplify extracts from other values. Note that nested extracts may
4775 // already be simplified implicitly by the above: extract (extract (insert) )
4776 // will be translated into extract ( insert ( extract ) ) first and then just
4777 // the value inserted, if appropriate. Similarly for extracts from single-use
4778 // loads: extract (extract (load)) will be translated to extract (load (gep))
4779 // and if again single-use then via load (gep (gep)) to load (gep).
4780 // However, double extracts from e.g. function arguments or return values
4781 // aren't handled yet.
4782 return nullptr;
4783}
4784
4785/// Return 'true' if the given typeinfo will match anything.
4786static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4787 switch (Personality) {
4791 // The GCC C EH and Rust personality only exists to support cleanups, so
4792 // it's not clear what the semantics of catch clauses are.
4793 return false;
4795 return false;
4797 // While __gnat_all_others_value will match any Ada exception, it doesn't
4798 // match foreign exceptions (or didn't, before gcc-4.7).
4799 return false;
4810 return TypeInfo->isNullValue();
4811 }
4812 llvm_unreachable("invalid enum");
4813}
4814
4815static bool shorter_filter(const Value *LHS, const Value *RHS) {
4816 return
4817 cast<ArrayType>(LHS->getType())->getNumElements()
4818 <
4819 cast<ArrayType>(RHS->getType())->getNumElements();
4820}
4821
4823 // The logic here should be correct for any real-world personality function.
4824 // However if that turns out not to be true, the offending logic can always
4825 // be conditioned on the personality function, like the catch-all logic is.
4826 EHPersonality Personality =
4827 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4828
4829 // Simplify the list of clauses, eg by removing repeated catch clauses
4830 // (these are often created by inlining).
4831 bool MakeNewInstruction = false; // If true, recreate using the following:
4832 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4833 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4834
4835 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4836 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4837 bool isLastClause = i + 1 == e;
4838 if (LI.isCatch(i)) {
4839 // A catch clause.
4840 Constant *CatchClause = LI.getClause(i);
4841 Constant *TypeInfo = CatchClause->stripPointerCasts();
4842
4843 // If we already saw this clause, there is no point in having a second
4844 // copy of it.
4845 if (AlreadyCaught.insert(TypeInfo).second) {
4846 // This catch clause was not already seen.
4847 NewClauses.push_back(CatchClause);
4848 } else {
4849 // Repeated catch clause - drop the redundant copy.
4850 MakeNewInstruction = true;
4851 }
4852
4853 // If this is a catch-all then there is no point in keeping any following
4854 // clauses or marking the landingpad as having a cleanup.
4855 if (isCatchAll(Personality, TypeInfo)) {
4856 if (!isLastClause)
4857 MakeNewInstruction = true;
4858 CleanupFlag = false;
4859 break;
4860 }
4861 } else {
4862 // A filter clause. If any of the filter elements were already caught
4863 // then they can be dropped from the filter. It is tempting to try to
4864 // exploit the filter further by saying that any typeinfo that does not
4865 // occur in the filter can't be caught later (and thus can be dropped).
4866 // However this would be wrong, since typeinfos can match without being
4867 // equal (for example if one represents a C++ class, and the other some
4868 // class derived from it).
4869 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4870 Constant *FilterClause = LI.getClause(i);
4871 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4872 unsigned NumTypeInfos = FilterType->getNumElements();
4873
4874 // An empty filter catches everything, so there is no point in keeping any
4875 // following clauses or marking the landingpad as having a cleanup. By
4876 // dealing with this case here the following code is made a bit simpler.
4877 if (!NumTypeInfos) {
4878 NewClauses.push_back(FilterClause);
4879 if (!isLastClause)
4880 MakeNewInstruction = true;
4881 CleanupFlag = false;
4882 break;
4883 }
4884
4885 bool MakeNewFilter = false; // If true, make a new filter.
4886 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4887 if (isa<ConstantAggregateZero>(FilterClause)) {
4888 // Not an empty filter - it contains at least one null typeinfo.
4889 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4890 Constant *TypeInfo =
4892 // If this typeinfo is a catch-all then the filter can never match.
4893 if (isCatchAll(Personality, TypeInfo)) {
4894 // Throw the filter away.
4895 MakeNewInstruction = true;
4896 continue;
4897 }
4898
4899 // There is no point in having multiple copies of this typeinfo, so
4900 // discard all but the first copy if there is more than one.
4901 NewFilterElts.push_back(TypeInfo);
4902 if (NumTypeInfos > 1)
4903 MakeNewFilter = true;
4904 } else {
4905 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4906 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4907 NewFilterElts.reserve(NumTypeInfos);
4908
4909 // Remove any filter elements that were already caught or that already
4910 // occurred in the filter. While there, see if any of the elements are
4911 // catch-alls. If so, the filter can be discarded.
4912 bool SawCatchAll = false;
4913 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4914 Constant *Elt = Filter->getOperand(j);
4915 Constant *TypeInfo = Elt->stripPointerCasts();
4916 if (isCatchAll(Personality, TypeInfo)) {
4917 // This element is a catch-all. Bail out, noting this fact.
4918 SawCatchAll = true;
4919 break;
4920 }
4921
4922 // Even if we've seen a type in a catch clause, we don't want to
4923 // remove it from the filter. An unexpected type handler may be
4924 // set up for a call site which throws an exception of the same
4925 // type caught. In order for the exception thrown by the unexpected
4926 // handler to propagate correctly, the filter must be correctly
4927 // described for the call site.
4928 //
4929 // Example:
4930 //
4931 // void unexpected() { throw 1;}
4932 // void foo() throw (int) {
4933 // std::set_unexpected(unexpected);
4934 // try {
4935 // throw 2.0;
4936 // } catch (int i) {}
4937 // }
4938
4939 // There is no point in having multiple copies of the same typeinfo in
4940 // a filter, so only add it if we didn't already.
4941 if (SeenInFilter.insert(TypeInfo).second)
4942 NewFilterElts.push_back(cast<Constant>(Elt));
4943 }
4944 // A filter containing a catch-all cannot match anything by definition.
4945 if (SawCatchAll) {
4946 // Throw the filter away.
4947 MakeNewInstruction = true;
4948 continue;
4949 }
4950
4951 // If we dropped something from the filter, make a new one.
4952 if (NewFilterElts.size() < NumTypeInfos)
4953 MakeNewFilter = true;
4954 }
4955 if (MakeNewFilter) {
4956 FilterType = ArrayType::get(FilterType->getElementType(),
4957 NewFilterElts.size());
4958 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4959 MakeNewInstruction = true;
4960 }
4961
4962 NewClauses.push_back(FilterClause);
4963
4964 // If the new filter is empty then it will catch everything so there is
4965 // no point in keeping any following clauses or marking the landingpad
4966 // as having a cleanup. The case of the original filter being empty was
4967 // already handled above.
4968 if (MakeNewFilter && !NewFilterElts.size()) {
4969 assert(MakeNewInstruction && "New filter but not a new instruction!");
4970 CleanupFlag = false;
4971 break;
4972 }
4973 }
4974 }
4975
4976 // If several filters occur in a row then reorder them so that the shortest
4977 // filters come first (those with the smallest number of elements). This is
4978 // advantageous because shorter filters are more likely to match, speeding up
4979 // unwinding, but mostly because it increases the effectiveness of the other
4980 // filter optimizations below.
4981 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4982 unsigned j;
4983 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4984 for (j = i; j != e; ++j)
4985 if (!isa<ArrayType>(NewClauses[j]->getType()))
4986 break;
4987
4988 // Check whether the filters are already sorted by length. We need to know
4989 // if sorting them is actually going to do anything so that we only make a
4990 // new landingpad instruction if it does.
4991 for (unsigned k = i; k + 1 < j; ++k)
4992 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4993 // Not sorted, so sort the filters now. Doing an unstable sort would be
4994 // correct too but reordering filters pointlessly might confuse users.
4995 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4997 MakeNewInstruction = true;
4998 break;
4999 }
5000
5001 // Look for the next batch of filters.
5002 i = j + 1;
5003 }
5004
5005 // If typeinfos matched if and only if equal, then the elements of a filter L
5006 // that occurs later than a filter F could be replaced by the intersection of
5007 // the elements of F and L. In reality two typeinfos can match without being
5008 // equal (for example if one represents a C++ class, and the other some class
5009 // derived from it) so it would be wrong to perform this transform in general.
5010 // However the transform is correct and useful if F is a subset of L. In that
5011 // case L can be replaced by F, and thus removed altogether since repeating a
5012 // filter is pointless. So here we look at all pairs of filters F and L where
5013 // L follows F in the list of clauses, and remove L if every element of F is
5014 // an element of L. This can occur when inlining C++ functions with exception
5015 // specifications.
5016 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
5017 // Examine each filter in turn.
5018 Value *Filter = NewClauses[i];
5019 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
5020 if (!FTy)
5021 // Not a filter - skip it.
5022 continue;
5023 unsigned FElts = FTy->getNumElements();
5024 // Examine each filter following this one. Doing this backwards means that
5025 // we don't have to worry about filters disappearing under us when removed.
5026 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
5027 Value *LFilter = NewClauses[j];
5028 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
5029 if (!LTy)
5030 // Not a filter - skip it.
5031 continue;
5032 // If Filter is a subset of LFilter, i.e. every element of Filter is also
5033 // an element of LFilter, then discard LFilter.
5034 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
5035 // If Filter is empty then it is a subset of LFilter.
5036 if (!FElts) {
5037 // Discard LFilter.
5038 NewClauses.erase(J);
5039 MakeNewInstruction = true;
5040 // Move on to the next filter.
5041 continue;
5042 }
5043 unsigned LElts = LTy->getNumElements();
5044 // If Filter is longer than LFilter then it cannot be a subset of it.
5045 if (FElts > LElts)
5046 // Move on to the next filter.
5047 continue;
5048 // At this point we know that LFilter has at least one element.
5049 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
5050 // Filter is a subset of LFilter iff Filter contains only zeros (as we
5051 // already know that Filter is not longer than LFilter).
5053 assert(FElts <= LElts && "Should have handled this case earlier!");
5054 // Discard LFilter.
5055 NewClauses.erase(J);
5056 MakeNewInstruction = true;
5057 }
5058 // Move on to the next filter.
5059 continue;
5060 }
5061 ConstantArray *LArray = cast<ConstantArray>(LFilter);
5062 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
5063 // Since Filter is non-empty and contains only zeros, it is a subset of
5064 // LFilter iff LFilter contains a zero.
5065 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
5066 for (unsigned l = 0; l != LElts; ++l)
5067 if (LArray->getOperand(l)->isNullValue()) {
5068 // LFilter contains a zero - discard it.
5069 NewClauses.erase(J);
5070 MakeNewInstruction = true;
5071 break;
5072 }
5073 // Move on to the next filter.
5074 continue;
5075 }
5076 // At this point we know that both filters are ConstantArrays. Loop over
5077 // operands to see whether every element of Filter is also an element of
5078 // LFilter. Since filters tend to be short this is probably faster than
5079 // using a method that scales nicely.
5081 bool AllFound = true;
5082 for (unsigned f = 0; f != FElts; ++f) {
5083 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
5084 AllFound = false;
5085 for (unsigned l = 0; l != LElts; ++l) {
5086 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
5087 if (LTypeInfo == FTypeInfo) {
5088 AllFound = true;
5089 break;
5090 }
5091 }
5092 if (!AllFound)
5093 break;
5094 }
5095 if (AllFound) {
5096 // Discard LFilter.
5097 NewClauses.erase(J);
5098 MakeNewInstruction = true;
5099 }
5100 // Move on to the next filter.
5101 }
5102 }
5103
5104 // If we changed any of the clauses, replace the old landingpad instruction
5105 // with a new one.
5106 if (MakeNewInstruction) {
5108 NewClauses.size());
5109 for (Constant *C : NewClauses)
5110 NLI->addClause(C);
5111 // A landing pad with no clauses must have the cleanup flag set. It is
5112 // theoretically possible, though highly unlikely, that we eliminated all
5113 // clauses. If so, force the cleanup flag to true.
5114 if (NewClauses.empty())
5115 CleanupFlag = true;
5116 NLI->setCleanup(CleanupFlag);
5117 return NLI;
5118 }
5119
5120 // Even if none of the clauses changed, we may nonetheless have understood
5121 // that the cleanup flag is pointless. Clear it if so.
5122 if (LI.isCleanup() != CleanupFlag) {
5123 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
5124 LI.setCleanup(CleanupFlag);
5125 return &LI;
5126 }
5127
5128 return nullptr;
5129}
5130
5131Value *
5133 // Try to push freeze through instructions that propagate but don't produce
5134 // poison as far as possible. If an operand of freeze does not produce poison
5135 // then push the freeze through to the operands that are not guaranteed
5136 // non-poison. The actual transform is as follows.
5137 // Op1 = ... ; Op1 can be poison
5138 // Op0 = Inst(Op1, NonPoisonOps...)
5139 // ... = Freeze(Op0)
5140 // =>
5141 // Op1 = ...
5142 // Op1.fr = Freeze(Op1)
5143 // ... = Inst(Op1.fr, NonPoisonOps...)
5144
5145 auto CanPushFreeze = [](Value *V) {
5146 if (!isa<Instruction>(V) || isa<PHINode>(V))
5147 return false;
5148
5149 // We can't push the freeze through an instruction which can itself create
5150 // poison. If the only source of new poison is flags, we can simply
5151 // strip them (since we know the only use is the freeze and nothing can
5152 // benefit from them.)
5154 /*ConsiderFlagsAndMetadata*/ false);
5155 };
5156
5157 // Pushing freezes up long instruction chains can be expensive. Instead,
5158 // we directly push the freeze all the way to the leaves. However, we leave
5159 // deduplication of freezes on the same value for freezeOtherUses().
5160 Use *OrigUse = &OrigFI.getOperandUse(0);
5163 Worklist.push_back(OrigUse);
5164 while (!Worklist.empty()) {
5165 auto *U = Worklist.pop_back_val();
5166 Value *V = U->get();
5167 if (!CanPushFreeze(V)) {
5168 // If we can't push through the original instruction, abort the transform.
5169 if (U == OrigUse)
5170 return nullptr;
5171
5172 auto *UserI = cast<Instruction>(U->getUser());
5173 Builder.SetInsertPoint(UserI);
5174 Value *Frozen = Builder.CreateFreeze(V, V->getName() + ".fr");
5175 U->set(Frozen);
5176 continue;
5177 }
5178
5179 auto *I = cast<Instruction>(V);
5180 if (!Visited.insert(I).second)
5181 continue;
5182
5183 // reverse() to emit freezes in a more natural order.
5184 for (Use &Op : reverse(I->operands())) {
5185 Value *OpV = Op.get();
5187 continue;
5188 Worklist.push_back(&Op);
5189 }
5190
5191 I->dropPoisonGeneratingAnnotations();
5192 this->Worklist.add(I);
5193 }
5194
5195 return OrigUse->get();
5196}
5197
5199 PHINode *PN) {
5200 // Detect whether this is a recurrence with a start value and some number of
5201 // backedge values. We'll check whether we can push the freeze through the
5202 // backedge values (possibly dropping poison flags along the way) until we
5203 // reach the phi again. In that case, we can move the freeze to the start
5204 // value.
5205 Use *StartU = nullptr;
5207 for (Use &U : PN->incoming_values()) {
5208 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
5209 // Add backedge value to worklist.
5210 Worklist.push_back(U.get());
5211 continue;
5212 }
5213
5214 // Don't bother handling multiple start values.
5215 if (StartU)
5216 return nullptr;
5217 StartU = &U;
5218 }
5219
5220 if (!StartU || Worklist.empty())
5221 return nullptr; // Not a recurrence.
5222
5223 Value *StartV = StartU->get();
5224 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
5225 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
5226 // We can't insert freeze if the start value is the result of the
5227 // terminator (e.g. an invoke).
5228 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
5229 return nullptr;
5230
5233 while (!Worklist.empty()) {
5234 Value *V = Worklist.pop_back_val();
5235 if (!Visited.insert(V).second)
5236 continue;
5237
5238 if (Visited.size() > 32)
5239 return nullptr; // Limit the total number of values we inspect.
5240
5241 // Assume that PN is non-poison, because it will be after the transform.
5242 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
5243 continue;
5244
5247 /*ConsiderFlagsAndMetadata*/ false))
5248 return nullptr;
5249
5250 DropFlags.push_back(I);
5251 append_range(Worklist, I->operands());
5252 }
5253
5254 for (Instruction *I : DropFlags)
5255 I->dropPoisonGeneratingAnnotations();
5256
5257 if (StartNeedsFreeze) {
5258 Builder.SetInsertPoint(StartBB->getTerminator());
5259 Value *FrozenStartV = Builder.CreateFreeze(StartV,
5260 StartV->getName() + ".fr");
5261 replaceUse(*StartU, FrozenStartV);
5262 }
5263 return replaceInstUsesWith(FI, PN);
5264}
5265
5267 Value *Op = FI.getOperand(0);
5268
5269 if (isa<Constant>(Op) || Op->hasOneUse())
5270 return false;
5271
5272 // Move the freeze directly after the definition of its operand, so that
5273 // it dominates the maximum number of uses. Note that it may not dominate
5274 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
5275 // the normal/default destination. This is why the domination check in the
5276 // replacement below is still necessary.
5277 BasicBlock::iterator MoveBefore;
5278 if (isa<Argument>(Op)) {
5279 MoveBefore =
5281 } else {
5282 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
5283 if (!MoveBeforeOpt)
5284 return false;
5285 MoveBefore = *MoveBeforeOpt;
5286 }
5287
5288 // Re-point iterator to come after any debug-info records.
5289 MoveBefore.setHeadBit(false);
5290
5291 bool Changed = false;
5292 if (&FI != &*MoveBefore) {
5293 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
5294 Changed = true;
5295 }
5296
5297 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
5298 bool Dominates = DT.dominates(&FI, U);
5299 Changed |= Dominates;
5300 return Dominates;
5301 });
5302
5303 return Changed;
5304}
5305
5306// Check if any direct or bitcast user of this value is a shuffle instruction.
5308 for (auto *U : V->users()) {
5310 return true;
5311 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
5312 return true;
5313 }
5314 return false;
5315}
5316
5318 Value *Op0 = I.getOperand(0);
5319
5320 if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
5321 return replaceInstUsesWith(I, V);
5322
5323 // freeze (phi const, x) --> phi const, (freeze x)
5324 if (auto *PN = dyn_cast<PHINode>(Op0)) {
5325 if (Instruction *NV = foldOpIntoPhi(I, PN))
5326 return NV;
5327 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
5328 return NV;
5329 }
5330
5332 return replaceInstUsesWith(I, NI);
5333
5334 // If I is freeze(undef), check its uses and fold it to a fixed constant.
5335 // - or: pick -1
5336 // - select's condition: if the true value is constant, choose it by making
5337 // the condition true.
5338 // - phi: pick the common constant across operands
5339 // - default: pick 0
5340 //
5341 // Note that this transform is intentionally done here rather than
5342 // via an analysis in InstSimplify or at individual user sites. That is
5343 // because we must produce the same value for all uses of the freeze -
5344 // it's the reason "freeze" exists!
5345 //
5346 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
5347 // duplicating logic for binops at least.
5348 auto getUndefReplacement = [&](Type *Ty) {
5349 auto pickCommonConstantFromPHI = [](PHINode &PN) -> Value * {
5350 // phi(freeze(undef), C, C). Choose C for freeze so the PHI can be
5351 // removed.
5352 Constant *BestValue = nullptr;
5353 for (Value *V : PN.incoming_values()) {
5354 if (match(V, m_Freeze(m_Undef())))
5355 continue;
5356
5358 if (!C)
5359 return nullptr;
5360
5362 return nullptr;
5363
5364 if (BestValue && BestValue != C)
5365 return nullptr;
5366
5367 BestValue = C;
5368 }
5369 return BestValue;
5370 };
5371
5372 Value *NullValue = Constant::getNullValue(Ty);
5373 Value *BestValue = nullptr;
5374 for (auto *U : I.users()) {
5375 Value *V = NullValue;
5376 if (match(U, m_Or(m_Value(), m_Value())))
5378 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
5379 V = ConstantInt::getTrue(Ty);
5380 else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) {
5381 if (V == &I || !isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT))
5382 V = NullValue;
5383 } else if (auto *PHI = dyn_cast<PHINode>(U)) {
5384 if (Value *MaybeV = pickCommonConstantFromPHI(*PHI))
5385 V = MaybeV;
5386 }
5387
5388 if (!BestValue)
5389 BestValue = V;
5390 else if (BestValue != V)
5391 BestValue = NullValue;
5392 }
5393 assert(BestValue && "Must have at least one use");
5394 assert(BestValue != &I && "Cannot replace with itself");
5395 return BestValue;
5396 };
5397
5398 if (match(Op0, m_Undef())) {
5399 // Don't fold freeze(undef/poison) if it's used as a vector operand in
5400 // a shuffle. This may improve codegen for shuffles that allow
5401 // unspecified inputs.
5403 return nullptr;
5404 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
5405 }
5406
5407 auto getFreezeVectorReplacement = [](Constant *C) -> Constant * {
5408 Type *Ty = C->getType();
5409 auto *VTy = dyn_cast<FixedVectorType>(Ty);
5410 if (!VTy)
5411 return nullptr;
5412 unsigned NumElts = VTy->getNumElements();
5413 Constant *BestValue = Constant::getNullValue(VTy->getScalarType());
5414 for (unsigned i = 0; i != NumElts; ++i) {
5415 Constant *EltC = C->getAggregateElement(i);
5416 if (EltC && !match(EltC, m_Undef())) {
5417 BestValue = EltC;
5418 break;
5419 }
5420 }
5421 return Constant::replaceUndefsWith(C, BestValue);
5422 };
5423
5424 Constant *C;
5425 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement() &&
5426 !C->containsConstantExpression()) {
5427 if (Constant *Repl = getFreezeVectorReplacement(C))
5428 return replaceInstUsesWith(I, Repl);
5429 }
5430
5431 // Replace uses of Op with freeze(Op).
5432 if (freezeOtherUses(I))
5433 return &I;
5434
5435 return nullptr;
5436}
5437
5438/// Check for case where the call writes to an otherwise dead alloca. This
5439/// shows up for unused out-params in idiomatic C/C++ code. Note that this
5440/// helper *only* analyzes the write; doesn't check any other legality aspect.
5442 auto *CB = dyn_cast<CallBase>(I);
5443 if (!CB)
5444 // TODO: handle e.g. store to alloca here - only worth doing if we extend
5445 // to allow reload along used path as described below. Otherwise, this
5446 // is simply a store to a dead allocation which will be removed.
5447 return false;
5448 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
5449 if (!Dest)
5450 return false;
5451 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
5452 if (!AI)
5453 // TODO: allow malloc?
5454 return false;
5455 // TODO: allow memory access dominated by move point? Note that since AI
5456 // could have a reference to itself captured by the call, we would need to
5457 // account for cycles in doing so.
5458 SmallVector<const User *> AllocaUsers;
5460 auto pushUsers = [&](const Instruction &I) {
5461 for (const User *U : I.users()) {
5462 if (Visited.insert(U).second)
5463 AllocaUsers.push_back(U);
5464 }
5465 };
5466 pushUsers(*AI);
5467 while (!AllocaUsers.empty()) {
5468 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
5469 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
5470 pushUsers(*UserI);
5471 continue;
5472 }
5473 if (UserI == CB)
5474 continue;
5475 // TODO: support lifetime.start/end here
5476 return false;
5477 }
5478 return true;
5479}
5480
5481/// Try to move the specified instruction from its current block into the
5482/// beginning of DestBlock, which can only happen if it's safe to move the
5483/// instruction past all of the instructions between it and the end of its
5484/// block.
5486 BasicBlock *DestBlock) {
5487 BasicBlock *SrcBlock = I->getParent();
5488
5489 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
5490 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
5491 I->isTerminator())
5492 return false;
5493
5494 // Do not sink static or dynamic alloca instructions. Static allocas must
5495 // remain in the entry block, and dynamic allocas must not be sunk in between
5496 // a stacksave / stackrestore pair, which would incorrectly shorten its
5497 // lifetime.
5498 if (isa<AllocaInst>(I))
5499 return false;
5500
5501 // Do not sink into catchswitch blocks.
5502 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
5503 return false;
5504
5505 // Do not sink convergent call instructions.
5506 if (auto *CI = dyn_cast<CallInst>(I)) {
5507 if (CI->isConvergent())
5508 return false;
5509 }
5510
5511 // Unless we can prove that the memory write isn't visibile except on the
5512 // path we're sinking to, we must bail.
5513 if (I->mayWriteToMemory()) {
5514 if (!SoleWriteToDeadLocal(I, TLI))
5515 return false;
5516 }
5517
5518 // We can only sink load instructions if there is nothing between the load and
5519 // the end of block that could change the value.
5520 if (I->mayReadFromMemory() &&
5521 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
5522 // We don't want to do any sophisticated alias analysis, so we only check
5523 // the instructions after I in I's parent block if we try to sink to its
5524 // successor block.
5525 if (DestBlock->getUniquePredecessor() != I->getParent())
5526 return false;
5527 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
5528 E = I->getParent()->end();
5529 Scan != E; ++Scan)
5530 if (Scan->mayWriteToMemory())
5531 return false;
5532 }
5533
5534 I->dropDroppableUses([&](const Use *U) {
5535 auto *I = dyn_cast<Instruction>(U->getUser());
5536 if (I && I->getParent() != DestBlock) {
5537 Worklist.add(I);
5538 return true;
5539 }
5540 return false;
5541 });
5542 /// FIXME: We could remove droppable uses that are not dominated by
5543 /// the new position.
5544
5545 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
5546 I->moveBefore(*DestBlock, InsertPos);
5547 ++NumSunkInst;
5548
5549 // Also sink all related debug uses from the source basic block. Otherwise we
5550 // get debug use before the def. Attempt to salvage debug uses first, to
5551 // maximise the range variables have location for. If we cannot salvage, then
5552 // mark the location undef: we know it was supposed to receive a new location
5553 // here, but that computation has been sunk.
5554 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
5555 findDbgUsers(I, DbgVariableRecords);
5556 if (!DbgVariableRecords.empty())
5557 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
5558 DbgVariableRecords);
5559
5560 // PS: there are numerous flaws with this behaviour, not least that right now
5561 // assignments can be re-ordered past other assignments to the same variable
5562 // if they use different Values. Creating more undef assignements can never be
5563 // undone. And salvaging all users outside of this block can un-necessarily
5564 // alter the lifetime of the live-value that the variable refers to.
5565 // Some of these things can be resolved by tolerating debug use-before-defs in
5566 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
5567 // being used for more architectures.
5568
5569 return true;
5570}
5571
5573 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
5574 BasicBlock *DestBlock,
5575 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
5576 // For all debug values in the destination block, the sunk instruction
5577 // will still be available, so they do not need to be dropped.
5578
5579 // Fetch all DbgVariableRecords not already in the destination.
5580 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
5581 for (auto &DVR : DbgVariableRecords)
5582 if (DVR->getParent() != DestBlock)
5583 DbgVariableRecordsToSalvage.push_back(DVR);
5584
5585 // Fetch a second collection, of DbgVariableRecords in the source block that
5586 // we're going to sink.
5587 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5588 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5589 if (DVR->getParent() == SrcBlock)
5590 DbgVariableRecordsToSink.push_back(DVR);
5591
5592 // Sort DbgVariableRecords according to their position in the block. This is a
5593 // partial order: DbgVariableRecords attached to different instructions will
5594 // be ordered by the instruction order, but DbgVariableRecords attached to the
5595 // same instruction won't have an order.
5596 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5597 return B->getInstruction()->comesBefore(A->getInstruction());
5598 };
5599 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5600
5601 // If there are two assignments to the same variable attached to the same
5602 // instruction, the ordering between the two assignments is important. Scan
5603 // for this (rare) case and establish which is the last assignment.
5604 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5606 if (DbgVariableRecordsToSink.size() > 1) {
5608 // Count how many assignments to each variable there is per instruction.
5609 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5610 DebugVariable DbgUserVariable =
5611 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5612 DVR->getDebugLoc()->getInlinedAt());
5613 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5614 }
5615
5616 // If there are any instructions with two assignments, add them to the
5617 // FilterOutMap to record that they need extra filtering.
5619 for (auto It : CountMap) {
5620 if (It.second > 1) {
5621 FilterOutMap[It.first] = nullptr;
5622 DupSet.insert(It.first.first);
5623 }
5624 }
5625
5626 // For all instruction/variable pairs needing extra filtering, find the
5627 // latest assignment.
5628 for (const Instruction *Inst : DupSet) {
5629 for (DbgVariableRecord &DVR :
5630 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5631 DebugVariable DbgUserVariable =
5632 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5633 DVR.getDebugLoc()->getInlinedAt());
5634 auto FilterIt =
5635 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5636 if (FilterIt == FilterOutMap.end())
5637 continue;
5638 if (FilterIt->second != nullptr)
5639 continue;
5640 FilterIt->second = &DVR;
5641 }
5642 }
5643 }
5644
5645 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5646 // out any duplicate assignments identified above.
5648 SmallSet<DebugVariable, 4> SunkVariables;
5649 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5651 continue;
5652
5653 DebugVariable DbgUserVariable =
5654 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5655 DVR->getDebugLoc()->getInlinedAt());
5656
5657 // For any variable where there were multiple assignments in the same place,
5658 // ignore all but the last assignment.
5659 if (!FilterOutMap.empty()) {
5660 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5661 auto It = FilterOutMap.find(IVP);
5662
5663 // Filter out.
5664 if (It != FilterOutMap.end() && It->second != DVR)
5665 continue;
5666 }
5667
5668 if (!SunkVariables.insert(DbgUserVariable).second)
5669 continue;
5670
5671 if (DVR->isDbgAssign())
5672 continue;
5673
5674 DVRClones.emplace_back(DVR->clone());
5675 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5676 }
5677
5678 // Perform salvaging without the clones, then sink the clones.
5679 if (DVRClones.empty())
5680 return;
5681
5682 salvageDebugInfoForDbgValues(*I, DbgVariableRecordsToSalvage);
5683
5684 // The clones are in reverse order of original appearance. Assert that the
5685 // head bit is set on the iterator as we _should_ have received it via
5686 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5687 // we'll repeatedly insert at the head, such as:
5688 // DVR-3 (third insertion goes here)
5689 // DVR-2 (second insertion goes here)
5690 // DVR-1 (first insertion goes here)
5691 // Any-Prior-DVRs
5692 // InsertPtInst
5693 assert(InsertPos.getHeadBit());
5694 for (DbgVariableRecord *DVRClone : DVRClones) {
5695 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5696 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5697 }
5698}
5699
5701 while (!Worklist.isEmpty()) {
5702 // Walk deferred instructions in reverse order, and push them to the
5703 // worklist, which means they'll end up popped from the worklist in-order.
5704 while (Instruction *I = Worklist.popDeferred()) {
5705 // Check to see if we can DCE the instruction. We do this already here to
5706 // reduce the number of uses and thus allow other folds to trigger.
5707 // Note that eraseInstFromFunction() may push additional instructions on
5708 // the deferred worklist, so this will DCE whole instruction chains.
5711 ++NumDeadInst;
5712 continue;
5713 }
5714
5715 Worklist.push(I);
5716 }
5717
5718 Instruction *I = Worklist.removeOne();
5719 if (I == nullptr) continue; // skip null values.
5720
5721 // Check to see if we can DCE the instruction.
5724 ++NumDeadInst;
5725 continue;
5726 }
5727
5728 if (!DebugCounter::shouldExecute(VisitCounter))
5729 continue;
5730
5731 // See if we can trivially sink this instruction to its user if we can
5732 // prove that the successor is not executed more frequently than our block.
5733 // Return the UserBlock if successful.
5734 auto getOptionalSinkBlockForInst =
5735 [this](Instruction *I) -> std::optional<BasicBlock *> {
5736 if (!EnableCodeSinking)
5737 return std::nullopt;
5738
5739 BasicBlock *BB = I->getParent();
5740 BasicBlock *UserParent = nullptr;
5741 unsigned NumUsers = 0;
5742
5743 for (Use &U : I->uses()) {
5744 User *User = U.getUser();
5745 if (User->isDroppable()) {
5746 // Do not sink if there are dereferenceable assumes that would be
5747 // removed.
5749 if (II->getIntrinsicID() != Intrinsic::assume ||
5750 !II->getOperandBundle("dereferenceable"))
5751 continue;
5752 }
5753
5754 if (NumUsers > MaxSinkNumUsers)
5755 return std::nullopt;
5756
5757 Instruction *UserInst = cast<Instruction>(User);
5758 // Special handling for Phi nodes - get the block the use occurs in.
5759 BasicBlock *UserBB = UserInst->getParent();
5760 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5761 UserBB = PN->getIncomingBlock(U);
5762 // Bail out if we have uses in different blocks. We don't do any
5763 // sophisticated analysis (i.e finding NearestCommonDominator of these
5764 // use blocks).
5765 if (UserParent && UserParent != UserBB)
5766 return std::nullopt;
5767 UserParent = UserBB;
5768
5769 // Make sure these checks are done only once, naturally we do the checks
5770 // the first time we get the userparent, this will save compile time.
5771 if (NumUsers == 0) {
5772 // Try sinking to another block. If that block is unreachable, then do
5773 // not bother. SimplifyCFG should handle it.
5774 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5775 return std::nullopt;
5776
5777 auto *Term = UserParent->getTerminator();
5778 // See if the user is one of our successors that has only one
5779 // predecessor, so that we don't have to split the critical edge.
5780 // Another option where we can sink is a block that ends with a
5781 // terminator that does not pass control to other block (such as
5782 // return or unreachable or resume). In this case:
5783 // - I dominates the User (by SSA form);
5784 // - the User will be executed at most once.
5785 // So sinking I down to User is always profitable or neutral.
5786 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5787 return std::nullopt;
5788
5789 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5790 }
5791
5792 NumUsers++;
5793 }
5794
5795 // No user or only has droppable users.
5796 if (!UserParent)
5797 return std::nullopt;
5798
5799 return UserParent;
5800 };
5801
5802 auto OptBB = getOptionalSinkBlockForInst(I);
5803 if (OptBB) {
5804 auto *UserParent = *OptBB;
5805 // Okay, the CFG is simple enough, try to sink this instruction.
5806 if (tryToSinkInstruction(I, UserParent)) {
5807 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5808 MadeIRChange = true;
5809 // We'll add uses of the sunk instruction below, but since
5810 // sinking can expose opportunities for it's *operands* add
5811 // them to the worklist
5812 for (Use &U : I->operands())
5813 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5814 Worklist.push(OpI);
5815 }
5816 }
5817
5818 // Now that we have an instruction, try combining it to simplify it.
5819 Builder.SetInsertPoint(I);
5820 Builder.CollectMetadataToCopy(
5821 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5822
5823#ifndef NDEBUG
5824 std::string OrigI;
5825#endif
5826 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5827 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5828
5829 if (Instruction *Result = visit(*I)) {
5830 ++NumCombined;
5831 // Should we replace the old instruction with a new one?
5832 if (Result != I) {
5833 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5834 << " New = " << *Result << '\n');
5835
5836 // We copy the old instruction's DebugLoc to the new instruction, unless
5837 // InstCombine already assigned a DebugLoc to it, in which case we
5838 // should trust the more specifically selected DebugLoc.
5839 Result->setDebugLoc(Result->getDebugLoc().orElse(I->getDebugLoc()));
5840 // We also copy annotation metadata to the new instruction.
5841 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5842 // Everything uses the new instruction now.
5843 I->replaceAllUsesWith(Result);
5844
5845 // Move the name to the new instruction first.
5846 Result->takeName(I);
5847
5848 // Insert the new instruction into the basic block...
5849 BasicBlock *InstParent = I->getParent();
5850 BasicBlock::iterator InsertPos = I->getIterator();
5851
5852 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5853 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5854 // We need to fix up the insertion point.
5855 if (isa<PHINode>(I)) // PHI -> Non-PHI
5856 InsertPos = InstParent->getFirstInsertionPt();
5857 else // Non-PHI -> PHI
5858 InsertPos = InstParent->getFirstNonPHIIt();
5859 }
5860
5861 Result->insertInto(InstParent, InsertPos);
5862
5863 // Push the new instruction and any users onto the worklist.
5864 Worklist.pushUsersToWorkList(*Result);
5865 Worklist.push(Result);
5866
5868 } else {
5869 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5870 << " New = " << *I << '\n');
5871
5872 // If the instruction was modified, it's possible that it is now dead.
5873 // if so, remove it.
5876 } else {
5877 Worklist.pushUsersToWorkList(*I);
5878 Worklist.push(I);
5879 }
5880 }
5881 MadeIRChange = true;
5882 }
5883 }
5884
5885 Worklist.zap();
5886 return MadeIRChange;
5887}
5888
5889// Track the scopes used by !alias.scope and !noalias. In a function, a
5890// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5891// by both sets. If not, the declaration of the scope can be safely omitted.
5892// The MDNode of the scope can be omitted as well for the instructions that are
5893// part of this function. We do not do that at this point, as this might become
5894// too time consuming to do.
5896 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5897 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5898
5899public:
5901 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5902 if (!I->hasMetadataOtherThanDebugLoc())
5903 return;
5904
5905 auto Track = [](Metadata *ScopeList, auto &Container) {
5906 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5907 if (!MDScopeList || !Container.insert(MDScopeList).second)
5908 return;
5909 for (const auto &MDOperand : MDScopeList->operands())
5910 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5911 Container.insert(MDScope);
5912 };
5913
5914 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5915 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5916 }
5917
5920 if (!Decl)
5921 return false;
5922
5923 assert(Decl->use_empty() &&
5924 "llvm.experimental.noalias.scope.decl in use ?");
5925 const MDNode *MDSL = Decl->getScopeList();
5926 assert(MDSL->getNumOperands() == 1 &&
5927 "llvm.experimental.noalias.scope should refer to a single scope");
5928 auto &MDOperand = MDSL->getOperand(0);
5929 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5930 return !UsedAliasScopesAndLists.contains(MD) ||
5931 !UsedNoAliasScopesAndLists.contains(MD);
5932
5933 // Not an MDNode ? throw away.
5934 return true;
5935 }
5936};
5937
5938/// Populate the IC worklist from a function, by walking it in reverse
5939/// post-order and adding all reachable code to the worklist.
5940///
5941/// This has a couple of tricks to make the code faster and more powerful. In
5942/// particular, we constant fold and DCE instructions as we go, to avoid adding
5943/// them to the worklist (this significantly speeds up instcombine on code where
5944/// many instructions are dead or constant). Additionally, if we find a branch
5945/// whose condition is a known constant, we only visit the reachable successors.
5947 bool MadeIRChange = false;
5949 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5950 DenseMap<Constant *, Constant *> FoldedConstants;
5951 AliasScopeTracker SeenAliasScopes;
5952
5953 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5954 for (BasicBlock *Succ : successors(BB))
5955 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5956 for (PHINode &PN : Succ->phis())
5957 for (Use &U : PN.incoming_values())
5958 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5959 U.set(PoisonValue::get(PN.getType()));
5960 MadeIRChange = true;
5961 }
5962 };
5963
5964 for (BasicBlock *BB : RPOT) {
5965 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5966 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5967 })) {
5968 HandleOnlyLiveSuccessor(BB, nullptr);
5969 continue;
5970 }
5971 LiveBlocks.insert(BB);
5972
5973 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5974 // ConstantProp instruction if trivially constant.
5975 if (!Inst.use_empty() &&
5976 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5977 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5978 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5979 << '\n');
5980 Inst.replaceAllUsesWith(C);
5981 ++NumConstProp;
5982 if (isInstructionTriviallyDead(&Inst, &TLI))
5983 Inst.eraseFromParent();
5984 MadeIRChange = true;
5985 continue;
5986 }
5987
5988 // See if we can constant fold its operands.
5989 for (Use &U : Inst.operands()) {
5991 continue;
5992
5993 auto *C = cast<Constant>(U);
5994 Constant *&FoldRes = FoldedConstants[C];
5995 if (!FoldRes)
5996 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5997
5998 if (FoldRes != C) {
5999 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
6000 << "\n Old = " << *C
6001 << "\n New = " << *FoldRes << '\n');
6002 U = FoldRes;
6003 MadeIRChange = true;
6004 }
6005 }
6006
6007 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
6008 // these call instructions consumes non-trivial amount of time and
6009 // provides no value for the optimization.
6010 if (!Inst.isDebugOrPseudoInst()) {
6011 InstrsForInstructionWorklist.push_back(&Inst);
6012 SeenAliasScopes.analyse(&Inst);
6013 }
6014 }
6015
6016 // If this is a branch or switch on a constant, mark only the single
6017 // live successor. Otherwise assume all successors are live.
6018 Instruction *TI = BB->getTerminator();
6019 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
6020 if (isa<UndefValue>(BI->getCondition())) {
6021 // Branch on undef is UB.
6022 HandleOnlyLiveSuccessor(BB, nullptr);
6023 continue;
6024 }
6025 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
6026 bool CondVal = Cond->getZExtValue();
6027 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
6028 continue;
6029 }
6030 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
6031 if (isa<UndefValue>(SI->getCondition())) {
6032 // Switch on undef is UB.
6033 HandleOnlyLiveSuccessor(BB, nullptr);
6034 continue;
6035 }
6036 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
6037 HandleOnlyLiveSuccessor(BB,
6038 SI->findCaseValue(Cond)->getCaseSuccessor());
6039 continue;
6040 }
6041 }
6042 }
6043
6044 // Remove instructions inside unreachable blocks. This prevents the
6045 // instcombine code from having to deal with some bad special cases, and
6046 // reduces use counts of instructions.
6047 for (BasicBlock &BB : F) {
6048 if (LiveBlocks.count(&BB))
6049 continue;
6050
6051 unsigned NumDeadInstInBB;
6052 NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
6053
6054 MadeIRChange |= NumDeadInstInBB != 0;
6055 NumDeadInst += NumDeadInstInBB;
6056 }
6057
6058 // Once we've found all of the instructions to add to instcombine's worklist,
6059 // add them in reverse order. This way instcombine will visit from the top
6060 // of the function down. This jives well with the way that it adds all uses
6061 // of instructions to the worklist after doing a transformation, thus avoiding
6062 // some N^2 behavior in pathological cases.
6063 Worklist.reserve(InstrsForInstructionWorklist.size());
6064 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
6065 // DCE instruction if trivially dead. As we iterate in reverse program
6066 // order here, we will clean up whole chains of dead instructions.
6067 if (isInstructionTriviallyDead(Inst, &TLI) ||
6068 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
6069 ++NumDeadInst;
6070 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
6071 salvageDebugInfo(*Inst);
6072 Inst->eraseFromParent();
6073 MadeIRChange = true;
6074 continue;
6075 }
6076
6077 Worklist.push(Inst);
6078 }
6079
6080 return MadeIRChange;
6081}
6082
6084 // Collect backedges.
6086 for (BasicBlock *BB : RPOT) {
6087 Visited.insert(BB);
6088 for (BasicBlock *Succ : successors(BB))
6089 if (Visited.contains(Succ))
6090 BackEdges.insert({BB, Succ});
6091 }
6092 ComputedBackEdges = true;
6093}
6094
6100 const InstCombineOptions &Opts) {
6101 auto &DL = F.getDataLayout();
6102 bool VerifyFixpoint = Opts.VerifyFixpoint &&
6103 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
6104
6105 /// Builder - This is an IRBuilder that automatically inserts new
6106 /// instructions into the worklist when they are created.
6108 F.getContext(), TargetFolder(DL),
6109 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
6110 Worklist.add(I);
6111 if (auto *Assume = dyn_cast<AssumeInst>(I))
6112 AC.registerAssumption(Assume);
6113 }));
6114
6116
6117 // Lower dbg.declare intrinsics otherwise their value may be clobbered
6118 // by instcombiner.
6119 bool MadeIRChange = false;
6121 MadeIRChange = LowerDbgDeclare(F);
6122
6123 // Iterate while there is work to do.
6124 unsigned Iteration = 0;
6125 while (true) {
6126 if (Iteration >= Opts.MaxIterations && !VerifyFixpoint) {
6127 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
6128 << " on " << F.getName()
6129 << " reached; stopping without verifying fixpoint\n");
6130 break;
6131 }
6132
6133 ++Iteration;
6134 ++NumWorklistIterations;
6135 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
6136 << F.getName() << "\n");
6137
6138 InstCombinerImpl IC(Worklist, Builder, F, AA, AC, TLI, TTI, DT, ORE, BFI,
6139 BPI, PSI, DL, RPOT);
6141 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
6142 MadeChangeInThisIteration |= IC.run();
6143 if (!MadeChangeInThisIteration)
6144 break;
6145
6146 MadeIRChange = true;
6147 if (Iteration > Opts.MaxIterations) {
6149 "Instruction Combining on " + Twine(F.getName()) +
6150 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
6151 " iterations. " +
6152 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
6153 "'instcombine-no-verify-fixpoint' to suppress this error.");
6154 }
6155 }
6156
6157 if (Iteration == 1)
6158 ++NumOneIteration;
6159 else if (Iteration == 2)
6160 ++NumTwoIterations;
6161 else if (Iteration == 3)
6162 ++NumThreeIterations;
6163 else
6164 ++NumFourOrMoreIterations;
6165
6166 return MadeIRChange;
6167}
6168
6170
6172 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
6173 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
6174 OS, MapClassName2PassName);
6175 OS << '<';
6176 OS << "max-iterations=" << Options.MaxIterations << ";";
6177 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
6178 OS << '>';
6179}
6180
6181char InstCombinePass::ID = 0;
6182
6185 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
6186 // No changes since last InstCombine pass, exit early.
6187 if (LRT.shouldSkip(&ID))
6188 return PreservedAnalyses::all();
6189
6190 auto &AC = AM.getResult<AssumptionAnalysis>(F);
6191 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
6192 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
6194 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
6195
6196 auto *AA = &AM.getResult<AAManager>(F);
6197 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
6198 ProfileSummaryInfo *PSI =
6199 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
6200 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6201 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
6203
6204 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6205 BFI, BPI, PSI, Options)) {
6206 // No changes, all analyses are preserved.
6207 LRT.update(&ID, /*Changed=*/false);
6208 return PreservedAnalyses::all();
6209 }
6210
6211 // Mark all the analyses that instcombine updates as preserved.
6213 LRT.update(&ID, /*Changed=*/true);
6216 return PA;
6217}
6218
6234
6236 if (skipFunction(F))
6237 return false;
6238
6239 // Required analyses.
6240 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6241 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6242 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
6244 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6246
6247 // Optional analyses.
6248 ProfileSummaryInfo *PSI =
6250 BlockFrequencyInfo *BFI =
6251 (PSI && PSI->hasProfileSummary()) ?
6253 nullptr;
6254 BranchProbabilityInfo *BPI = nullptr;
6255 if (auto *WrapperPass =
6257 BPI = &WrapperPass->getBPI();
6258
6259 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6260 BFI, BPI, PSI, InstCombineOptions());
6261}
6262
6264
6268
6270 "Combine redundant instructions", false, false)
6281 "Combine redundant instructions", false, false)
6282
6283// Initialization Routines
6287
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI)
DXIL Resource Access
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static Constant * constantFoldBinOpWithSplat(unsigned Opcode, Constant *Vector, Constant *Splat, bool SplatLHS, const DataLayout &DL)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * combineConstantOffsets(GetElementPtrInst &GEP, InstCombinerImpl &IC)
Combine constant offsets separated by variable offsets.
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
uint64_t IntrinsicInst * II
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
unsigned OpIndex
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
The Input class is used to parse a yaml document into in-memory structs and vectors.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Definition APFloat.cpp:213
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1769
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition APInt.cpp:1901
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1497
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1939
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:828
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1971
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:406
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1952
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:852
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition ArrayRef.h:219
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:103
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:259
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:539
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:493
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
size_t size() const
Definition BasicBlock.h:491
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition InstrTypes.h:294
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
ConstantArray - Constant Array Declarations.
Definition Constants.h:438
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:781
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition Constants.h:522
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
Definition Constant.h:222
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(CounterInfo &Counter)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:321
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition Pass.cpp:188
const BasicBlock & getEntryBlock() const
Definition Function.h:813
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForReassociate(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep (gep, p, y), x).
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2026
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:538
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition IRBuilder.h:75
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2776
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
Instruction * foldBinOpSelectBinOp(BinaryOperator &Op)
In some cases it is beneficial to fold a select into a binary operator.
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
bool SimplifyDemandedFPClass(Instruction *I, unsigned Op, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth=0)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
SimplifyQuery SQ
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
const DataLayout & DL
DomConditionCache DC
const bool MinimizeSize
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
AssumptionCache & AC
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
BuilderTy & Builder
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition InstCombine.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
bool isShift() const
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isIntDivRem() const
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
Tracking metadata reference owned by Metadata.
Definition Metadata.h:902
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition Metadata.h:64
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition Constants.h:1478
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition Registry.h:53
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition SmallPtrSet.h:99
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:106
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Use * op_iterator
Definition User.h:254
op_range operands()
Definition User.h:267
op_iterator op_begin()
Definition User.h:259
const Use & getOperandUse(unsigned i) const
Definition User.h:220
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:119
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
op_iterator op_end()
Definition User.h:261
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:759
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
iterator_range< user_iterator > users()
Definition Value.h:426
bool hasUseList() const
Check if this Value has a use-list.
Definition Value.h:344
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition Value.cpp:150
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:708
bool use_empty() const
Definition Value.h:346
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:888
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
reverse_self_iterator getReverseIterator()
Definition ilist_node.h:126
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
Splat_match< T > m_ConstantSplat(const T &SubPattern)
Match a constant splat. TODO: Extend this to non-constant splats.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_VectorInsert(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2106
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2498
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
Definition CFG.h:257
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2544
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1731
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2198
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition APFloat.h:1618
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2481
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1808
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1675
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2050
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2427
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:323
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ NoModRef
The access neither references nor modifies the value stored in memory.
Definition ModRef.h:30
TargetTransformInfo TTI
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2009
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
Definition Metadata.cpp:64
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition STLExtras.h:2136
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition KnownBits.h:264
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:261
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SimplifyQuery getWithInstruction(const Instruction *I) const