LLVM  16.0.0git
InstCombineShifts.cpp
Go to the documentation of this file.
1 //===- InstCombineShifts.cpp ----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitShl, visitLShr, and visitAShr functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
15 #include "llvm/IR/IntrinsicInst.h"
16 #include "llvm/IR/PatternMatch.h"
18 using namespace llvm;
19 using namespace PatternMatch;
20 
21 #define DEBUG_TYPE "instcombine"
22 
24  Value *ShAmt1) {
25  // We have two shift amounts from two different shifts. The types of those
26  // shift amounts may not match. If that's the case let's bailout now..
27  if (ShAmt0->getType() != ShAmt1->getType())
28  return false;
29 
30  // As input, we have the following pattern:
31  // Sh0 (Sh1 X, Q), K
32  // We want to rewrite that as:
33  // Sh x, (Q+K) iff (Q+K) u< bitwidth(x)
34  // While we know that originally (Q+K) would not overflow
35  // (because 2 * (N-1) u<= iN -1), we have looked past extensions of
36  // shift amounts. so it may now overflow in smaller bitwidth.
37  // To ensure that does not happen, we need to ensure that the total maximal
38  // shift amount is still representable in that smaller bit width.
39  unsigned MaximalPossibleTotalShiftAmount =
40  (Sh0->getType()->getScalarSizeInBits() - 1) +
41  (Sh1->getType()->getScalarSizeInBits() - 1);
42  APInt MaximalRepresentableShiftAmount =
44  return MaximalRepresentableShiftAmount.uge(MaximalPossibleTotalShiftAmount);
45 }
46 
47 // Given pattern:
48 // (x shiftopcode Q) shiftopcode K
49 // we should rewrite it as
50 // x shiftopcode (Q+K) iff (Q+K) u< bitwidth(x) and
51 //
52 // This is valid for any shift, but they must be identical, and we must be
53 // careful in case we have (zext(Q)+zext(K)) and look past extensions,
54 // (Q+K) must not overflow or else (Q+K) u< bitwidth(x) is bogus.
55 //
56 // AnalyzeForSignBitExtraction indicates that we will only analyze whether this
57 // pattern has any 2 right-shifts that sum to 1 less than original bit width.
59  BinaryOperator *Sh0, const SimplifyQuery &SQ,
60  bool AnalyzeForSignBitExtraction) {
61  // Look for a shift of some instruction, ignore zext of shift amount if any.
62  Instruction *Sh0Op0;
63  Value *ShAmt0;
64  if (!match(Sh0,
65  m_Shift(m_Instruction(Sh0Op0), m_ZExtOrSelf(m_Value(ShAmt0)))))
66  return nullptr;
67 
68  // If there is a truncation between the two shifts, we must make note of it
69  // and look through it. The truncation imposes additional constraints on the
70  // transform.
71  Instruction *Sh1;
72  Value *Trunc = nullptr;
73  match(Sh0Op0,
75  m_Instruction(Sh1)));
76 
77  // Inner shift: (x shiftopcode ShAmt1)
78  // Like with other shift, ignore zext of shift amount if any.
79  Value *X, *ShAmt1;
80  if (!match(Sh1, m_Shift(m_Value(X), m_ZExtOrSelf(m_Value(ShAmt1)))))
81  return nullptr;
82 
83  // Verify that it would be safe to try to add those two shift amounts.
84  if (!canTryToConstantAddTwoShiftAmounts(Sh0, ShAmt0, Sh1, ShAmt1))
85  return nullptr;
86 
87  // We are only looking for signbit extraction if we have two right shifts.
88  bool HadTwoRightShifts = match(Sh0, m_Shr(m_Value(), m_Value())) &&
89  match(Sh1, m_Shr(m_Value(), m_Value()));
90  // ... and if it's not two right-shifts, we know the answer already.
91  if (AnalyzeForSignBitExtraction && !HadTwoRightShifts)
92  return nullptr;
93 
94  // The shift opcodes must be identical, unless we are just checking whether
95  // this pattern can be interpreted as a sign-bit-extraction.
96  Instruction::BinaryOps ShiftOpcode = Sh0->getOpcode();
97  bool IdenticalShOpcodes = Sh0->getOpcode() == Sh1->getOpcode();
98  if (!IdenticalShOpcodes && !AnalyzeForSignBitExtraction)
99  return nullptr;
100 
101  // If we saw truncation, we'll need to produce extra instruction,
102  // and for that one of the operands of the shift must be one-use,
103  // unless of course we don't actually plan to produce any instructions here.
104  if (Trunc && !AnalyzeForSignBitExtraction &&
105  !match(Sh0, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
106  return nullptr;
107 
108  // Can we fold (ShAmt0+ShAmt1) ?
109  auto *NewShAmt = dyn_cast_or_null<Constant>(
110  simplifyAddInst(ShAmt0, ShAmt1, /*isNSW=*/false, /*isNUW=*/false,
111  SQ.getWithInstruction(Sh0)));
112  if (!NewShAmt)
113  return nullptr; // Did not simplify.
114  unsigned NewShAmtBitWidth = NewShAmt->getType()->getScalarSizeInBits();
115  unsigned XBitWidth = X->getType()->getScalarSizeInBits();
116  // Is the new shift amount smaller than the bit width of inner/new shift?
117  if (!match(NewShAmt, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT,
118  APInt(NewShAmtBitWidth, XBitWidth))))
119  return nullptr; // FIXME: could perform constant-folding.
120 
121  // If there was a truncation, and we have a right-shift, we can only fold if
122  // we are left with the original sign bit. Likewise, if we were just checking
123  // that this is a sighbit extraction, this is the place to check it.
124  // FIXME: zero shift amount is also legal here, but we can't *easily* check
125  // more than one predicate so it's not really worth it.
126  if (HadTwoRightShifts && (Trunc || AnalyzeForSignBitExtraction)) {
127  // If it's not a sign bit extraction, then we're done.
128  if (!match(NewShAmt,
129  m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
130  APInt(NewShAmtBitWidth, XBitWidth - 1))))
131  return nullptr;
132  // If it is, and that was the question, return the base value.
133  if (AnalyzeForSignBitExtraction)
134  return X;
135  }
136 
137  assert(IdenticalShOpcodes && "Should not get here with different shifts.");
138 
139  // All good, we can do this fold.
140  NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, X->getType());
141 
142  BinaryOperator *NewShift = BinaryOperator::Create(ShiftOpcode, X, NewShAmt);
143 
144  // The flags can only be propagated if there wasn't a trunc.
145  if (!Trunc) {
146  // If the pattern did not involve trunc, and both of the original shifts
147  // had the same flag set, preserve the flag.
148  if (ShiftOpcode == Instruction::BinaryOps::Shl) {
149  NewShift->setHasNoUnsignedWrap(Sh0->hasNoUnsignedWrap() &&
150  Sh1->hasNoUnsignedWrap());
151  NewShift->setHasNoSignedWrap(Sh0->hasNoSignedWrap() &&
152  Sh1->hasNoSignedWrap());
153  } else {
154  NewShift->setIsExact(Sh0->isExact() && Sh1->isExact());
155  }
156  }
157 
158  Instruction *Ret = NewShift;
159  if (Trunc) {
160  Builder.Insert(NewShift);
161  Ret = CastInst::Create(Instruction::Trunc, NewShift, Sh0->getType());
162  }
163 
164  return Ret;
165 }
166 
167 // If we have some pattern that leaves only some low bits set, and then performs
168 // left-shift of those bits, if none of the bits that are left after the final
169 // shift are modified by the mask, we can omit the mask.
170 //
171 // There are many variants to this pattern:
172 // a) (x & ((1 << MaskShAmt) - 1)) << ShiftShAmt
173 // b) (x & (~(-1 << MaskShAmt))) << ShiftShAmt
174 // c) (x & (-1 l>> MaskShAmt)) << ShiftShAmt
175 // d) (x & ((-1 << MaskShAmt) l>> MaskShAmt)) << ShiftShAmt
176 // e) ((x << MaskShAmt) l>> MaskShAmt) << ShiftShAmt
177 // f) ((x << MaskShAmt) a>> MaskShAmt) << ShiftShAmt
178 // All these patterns can be simplified to just:
179 // x << ShiftShAmt
180 // iff:
181 // a,b) (MaskShAmt+ShiftShAmt) u>= bitwidth(x)
182 // c,d,e,f) (ShiftShAmt-MaskShAmt) s>= 0 (i.e. ShiftShAmt u>= MaskShAmt)
183 static Instruction *
185  const SimplifyQuery &Q,
187  assert(OuterShift->getOpcode() == Instruction::BinaryOps::Shl &&
188  "The input must be 'shl'!");
189 
190  Value *Masked, *ShiftShAmt;
191  match(OuterShift,
192  m_Shift(m_Value(Masked), m_ZExtOrSelf(m_Value(ShiftShAmt))));
193 
194  // *If* there is a truncation between an outer shift and a possibly-mask,
195  // then said truncation *must* be one-use, else we can't perform the fold.
196  Value *Trunc;
197  if (match(Masked, m_CombineAnd(m_Trunc(m_Value(Masked)), m_Value(Trunc))) &&
198  !Trunc->hasOneUse())
199  return nullptr;
200 
201  Type *NarrowestTy = OuterShift->getType();
202  Type *WidestTy = Masked->getType();
203  bool HadTrunc = WidestTy != NarrowestTy;
204 
205  // The mask must be computed in a type twice as wide to ensure
206  // that no bits are lost if the sum-of-shifts is wider than the base type.
207  Type *ExtendedTy = WidestTy->getExtendedType();
208 
209  Value *MaskShAmt;
210 
211  // ((1 << MaskShAmt) - 1)
212  auto MaskA = m_Add(m_Shl(m_One(), m_Value(MaskShAmt)), m_AllOnes());
213  // (~(-1 << maskNbits))
214  auto MaskB = m_Xor(m_Shl(m_AllOnes(), m_Value(MaskShAmt)), m_AllOnes());
215  // (-1 l>> MaskShAmt)
216  auto MaskC = m_LShr(m_AllOnes(), m_Value(MaskShAmt));
217  // ((-1 << MaskShAmt) l>> MaskShAmt)
218  auto MaskD =
219  m_LShr(m_Shl(m_AllOnes(), m_Value(MaskShAmt)), m_Deferred(MaskShAmt));
220 
221  Value *X;
222  Constant *NewMask;
223 
224  if (match(Masked, m_c_And(m_CombineOr(MaskA, MaskB), m_Value(X)))) {
225  // Peek through an optional zext of the shift amount.
226  match(MaskShAmt, m_ZExtOrSelf(m_Value(MaskShAmt)));
227 
228  // Verify that it would be safe to try to add those two shift amounts.
229  if (!canTryToConstantAddTwoShiftAmounts(OuterShift, ShiftShAmt, Masked,
230  MaskShAmt))
231  return nullptr;
232 
233  // Can we simplify (MaskShAmt+ShiftShAmt) ?
234  auto *SumOfShAmts = dyn_cast_or_null<Constant>(simplifyAddInst(
235  MaskShAmt, ShiftShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q));
236  if (!SumOfShAmts)
237  return nullptr; // Did not simplify.
238  // In this pattern SumOfShAmts correlates with the number of low bits
239  // that shall remain in the root value (OuterShift).
240 
241  // An extend of an undef value becomes zero because the high bits are never
242  // completely unknown. Replace the `undef` shift amounts with final
243  // shift bitwidth to ensure that the value remains undef when creating the
244  // subsequent shift op.
245  SumOfShAmts = Constant::replaceUndefsWith(
246  SumOfShAmts, ConstantInt::get(SumOfShAmts->getType()->getScalarType(),
247  ExtendedTy->getScalarSizeInBits()));
248  auto *ExtendedSumOfShAmts = ConstantExpr::getZExt(SumOfShAmts, ExtendedTy);
249  // And compute the mask as usual: ~(-1 << (SumOfShAmts))
250  auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(ExtendedTy);
251  auto *ExtendedInvertedMask =
252  ConstantExpr::getShl(ExtendedAllOnes, ExtendedSumOfShAmts);
253  NewMask = ConstantExpr::getNot(ExtendedInvertedMask);
254  } else if (match(Masked, m_c_And(m_CombineOr(MaskC, MaskD), m_Value(X))) ||
255  match(Masked, m_Shr(m_Shl(m_Value(X), m_Value(MaskShAmt)),
256  m_Deferred(MaskShAmt)))) {
257  // Peek through an optional zext of the shift amount.
258  match(MaskShAmt, m_ZExtOrSelf(m_Value(MaskShAmt)));
259 
260  // Verify that it would be safe to try to add those two shift amounts.
261  if (!canTryToConstantAddTwoShiftAmounts(OuterShift, ShiftShAmt, Masked,
262  MaskShAmt))
263  return nullptr;
264 
265  // Can we simplify (ShiftShAmt-MaskShAmt) ?
266  auto *ShAmtsDiff = dyn_cast_or_null<Constant>(simplifySubInst(
267  ShiftShAmt, MaskShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q));
268  if (!ShAmtsDiff)
269  return nullptr; // Did not simplify.
270  // In this pattern ShAmtsDiff correlates with the number of high bits that
271  // shall be unset in the root value (OuterShift).
272 
273  // An extend of an undef value becomes zero because the high bits are never
274  // completely unknown. Replace the `undef` shift amounts with negated
275  // bitwidth of innermost shift to ensure that the value remains undef when
276  // creating the subsequent shift op.
277  unsigned WidestTyBitWidth = WidestTy->getScalarSizeInBits();
278  ShAmtsDiff = Constant::replaceUndefsWith(
279  ShAmtsDiff, ConstantInt::get(ShAmtsDiff->getType()->getScalarType(),
280  -WidestTyBitWidth));
281  auto *ExtendedNumHighBitsToClear = ConstantExpr::getZExt(
282  ConstantExpr::getSub(ConstantInt::get(ShAmtsDiff->getType(),
283  WidestTyBitWidth,
284  /*isSigned=*/false),
285  ShAmtsDiff),
286  ExtendedTy);
287  // And compute the mask as usual: (-1 l>> (NumHighBitsToClear))
288  auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(ExtendedTy);
289  NewMask =
290  ConstantExpr::getLShr(ExtendedAllOnes, ExtendedNumHighBitsToClear);
291  } else
292  return nullptr; // Don't know anything about this pattern.
293 
294  NewMask = ConstantExpr::getTrunc(NewMask, NarrowestTy);
295 
296  // Does this mask has any unset bits? If not then we can just not apply it.
297  bool NeedMask = !match(NewMask, m_AllOnes());
298 
299  // If we need to apply a mask, there are several more restrictions we have.
300  if (NeedMask) {
301  // The old masking instruction must go away.
302  if (!Masked->hasOneUse())
303  return nullptr;
304  // The original "masking" instruction must not have been`ashr`.
305  if (match(Masked, m_AShr(m_Value(), m_Value())))
306  return nullptr;
307  }
308 
309  // If we need to apply truncation, let's do it first, since we can.
310  // We have already ensured that the old truncation will go away.
311  if (HadTrunc)
312  X = Builder.CreateTrunc(X, NarrowestTy);
313 
314  // No 'NUW'/'NSW'! We no longer know that we won't shift-out non-0 bits.
315  // We didn't change the Type of this outermost shift, so we can just do it.
316  auto *NewShift = BinaryOperator::Create(OuterShift->getOpcode(), X,
317  OuterShift->getOperand(1));
318  if (!NeedMask)
319  return NewShift;
320 
321  Builder.Insert(NewShift);
322  return BinaryOperator::Create(Instruction::And, NewShift, NewMask);
323 }
324 
325 /// If we have a shift-by-constant of a bitwise logic op that itself has a
326 /// shift-by-constant operand with identical opcode, we may be able to convert
327 /// that into 2 independent shifts followed by the logic op. This eliminates a
328 /// a use of an intermediate value (reduces dependency chain).
331  assert(I.isShift() && "Expected a shift as input");
332  auto *LogicInst = dyn_cast<BinaryOperator>(I.getOperand(0));
333  if (!LogicInst || !LogicInst->isBitwiseLogicOp() || !LogicInst->hasOneUse())
334  return nullptr;
335 
336  Constant *C0, *C1;
337  if (!match(I.getOperand(1), m_Constant(C1)))
338  return nullptr;
339 
340  Instruction::BinaryOps ShiftOpcode = I.getOpcode();
341  Type *Ty = I.getType();
342 
343  // Find a matching one-use shift by constant. The fold is not valid if the sum
344  // of the shift values equals or exceeds bitwidth.
345  // TODO: Remove the one-use check if the other logic operand (Y) is constant.
346  Value *X, *Y;
347  auto matchFirstShift = [&](Value *V) {
348  APInt Threshold(Ty->getScalarSizeInBits(), Ty->getScalarSizeInBits());
349  return match(V, m_BinOp(ShiftOpcode, m_Value(), m_Value())) &&
350  match(V, m_OneUse(m_Shift(m_Value(X), m_Constant(C0)))) &&
353  };
354 
355  // Logic ops are commutative, so check each operand for a match.
356  if (matchFirstShift(LogicInst->getOperand(0)))
357  Y = LogicInst->getOperand(1);
358  else if (matchFirstShift(LogicInst->getOperand(1)))
359  Y = LogicInst->getOperand(0);
360  else
361  return nullptr;
362 
363  // shift (logic (shift X, C0), Y), C1 -> logic (shift X, C0+C1), (shift Y, C1)
364  Constant *ShiftSumC = ConstantExpr::getAdd(C0, C1);
365  Value *NewShift1 = Builder.CreateBinOp(ShiftOpcode, X, ShiftSumC);
366  Value *NewShift2 = Builder.CreateBinOp(ShiftOpcode, Y, I.getOperand(1));
367  return BinaryOperator::Create(LogicInst->getOpcode(), NewShift1, NewShift2);
368 }
369 
371  if (Instruction *Phi = foldBinopWithPhiOperands(I))
372  return Phi;
373 
374  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
375  assert(Op0->getType() == Op1->getType());
376  Type *Ty = I.getType();
377 
378  // If the shift amount is a one-use `sext`, we can demote it to `zext`.
379  Value *Y;
380  if (match(Op1, m_OneUse(m_SExt(m_Value(Y))))) {
381  Value *NewExt = Builder.CreateZExt(Y, Ty, Op1->getName());
382  return BinaryOperator::Create(I.getOpcode(), Op0, NewExt);
383  }
384 
385  // See if we can fold away this shift.
386  if (SimplifyDemandedInstructionBits(I))
387  return &I;
388 
389  // Try to fold constant and into select arguments.
390  if (isa<Constant>(Op0))
391  if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
392  if (Instruction *R = FoldOpIntoSelect(I, SI))
393  return R;
394 
395  if (Constant *CUI = dyn_cast<Constant>(Op1))
396  if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
397  return Res;
398 
399  if (auto *NewShift = cast_or_null<Instruction>(
400  reassociateShiftAmtsOfTwoSameDirectionShifts(&I, SQ)))
401  return NewShift;
402 
403  // Pre-shift a constant shifted by a variable amount with constant offset:
404  // C shift (A add nuw C1) --> (C shift C1) shift A
405  Value *A;
406  Constant *C, *C1;
407  if (match(Op0, m_Constant(C)) &&
408  match(Op1, m_NUWAdd(m_Value(A), m_Constant(C1)))) {
409  Value *NewC = Builder.CreateBinOp(I.getOpcode(), C, C1);
410  return BinaryOperator::Create(I.getOpcode(), NewC, A);
411  }
412 
413  unsigned BitWidth = Ty->getScalarSizeInBits();
414 
415  const APInt *AC, *AddC;
416  // Try to pre-shift a constant shifted by a variable amount added with a
417  // negative number:
418  // C << (X - AddC) --> (C >> AddC) << X
419  // and
420  // C >> (X - AddC) --> (C << AddC) >> X
421  if (match(Op0, m_APInt(AC)) && match(Op1, m_Add(m_Value(A), m_APInt(AddC))) &&
422  AddC->isNegative() && (-*AddC).ult(BitWidth)) {
423  assert(!AC->isZero() && "Expected simplify of shifted zero");
424  unsigned PosOffset = (-*AddC).getZExtValue();
425 
426  auto isSuitableForPreShift = [PosOffset, &I, AC]() {
427  switch (I.getOpcode()) {
428  default:
429  return false;
430  case Instruction::Shl:
431  return (I.hasNoSignedWrap() || I.hasNoUnsignedWrap()) &&
432  AC->eq(AC->lshr(PosOffset).shl(PosOffset));
433  case Instruction::LShr:
434  return I.isExact() && AC->eq(AC->shl(PosOffset).lshr(PosOffset));
435  case Instruction::AShr:
436  return I.isExact() && AC->eq(AC->shl(PosOffset).ashr(PosOffset));
437  }
438  };
439  if (isSuitableForPreShift()) {
440  Constant *NewC = ConstantInt::get(Ty, I.getOpcode() == Instruction::Shl
441  ? AC->lshr(PosOffset)
442  : AC->shl(PosOffset));
443  BinaryOperator *NewShiftOp =
444  BinaryOperator::Create(I.getOpcode(), NewC, A);
445  if (I.getOpcode() == Instruction::Shl) {
446  NewShiftOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
447  } else {
448  NewShiftOp->setIsExact();
449  }
450  return NewShiftOp;
451  }
452  }
453 
454  // X shift (A srem C) -> X shift (A and (C - 1)) iff C is a power of 2.
455  // Because shifts by negative values (which could occur if A were negative)
456  // are undefined.
457  if (Op1->hasOneUse() && match(Op1, m_SRem(m_Value(A), m_Constant(C))) &&
458  match(C, m_Power2())) {
459  // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't
460  // demand the sign bit (and many others) here??
462  Value *Rem = Builder.CreateAnd(A, Mask, Op1->getName());
463  return replaceOperand(I, 1, Rem);
464  }
465 
467  return Logic;
468 
469  return nullptr;
470 }
471 
472 /// Return true if we can simplify two logical (either left or right) shifts
473 /// that have constant shift amounts: OuterShift (InnerShift X, C1), C2.
474 static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl,
475  Instruction *InnerShift,
476  InstCombinerImpl &IC, Instruction *CxtI) {
477  assert(InnerShift->isLogicalShift() && "Unexpected instruction type");
478 
479  // We need constant scalar or constant splat shifts.
480  const APInt *InnerShiftConst;
481  if (!match(InnerShift->getOperand(1), m_APInt(InnerShiftConst)))
482  return false;
483 
484  // Two logical shifts in the same direction:
485  // shl (shl X, C1), C2 --> shl X, C1 + C2
486  // lshr (lshr X, C1), C2 --> lshr X, C1 + C2
487  bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl;
488  if (IsInnerShl == IsOuterShl)
489  return true;
490 
491  // Equal shift amounts in opposite directions become bitwise 'and':
492  // lshr (shl X, C), C --> and X, C'
493  // shl (lshr X, C), C --> and X, C'
494  if (*InnerShiftConst == OuterShAmt)
495  return true;
496 
497  // If the 2nd shift is bigger than the 1st, we can fold:
498  // lshr (shl X, C1), C2 --> and (shl X, C1 - C2), C3
499  // shl (lshr X, C1), C2 --> and (lshr X, C1 - C2), C3
500  // but it isn't profitable unless we know the and'd out bits are already zero.
501  // Also, check that the inner shift is valid (less than the type width) or
502  // we'll crash trying to produce the bit mask for the 'and'.
503  unsigned TypeWidth = InnerShift->getType()->getScalarSizeInBits();
504  if (InnerShiftConst->ugt(OuterShAmt) && InnerShiftConst->ult(TypeWidth)) {
505  unsigned InnerShAmt = InnerShiftConst->getZExtValue();
506  unsigned MaskShift =
507  IsInnerShl ? TypeWidth - InnerShAmt : InnerShAmt - OuterShAmt;
508  APInt Mask = APInt::getLowBitsSet(TypeWidth, OuterShAmt) << MaskShift;
509  if (IC.MaskedValueIsZero(InnerShift->getOperand(0), Mask, 0, CxtI))
510  return true;
511  }
512 
513  return false;
514 }
515 
516 /// See if we can compute the specified value, but shifted logically to the left
517 /// or right by some number of bits. This should return true if the expression
518 /// can be computed for the same cost as the current expression tree. This is
519 /// used to eliminate extraneous shifting from things like:
520 /// %C = shl i128 %A, 64
521 /// %D = shl i128 %B, 96
522 /// %E = or i128 %C, %D
523 /// %F = lshr i128 %E, 64
524 /// where the client will ask if E can be computed shifted right by 64-bits. If
525 /// this succeeds, getShiftedValue() will be called to produce the value.
526 static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift,
527  InstCombinerImpl &IC, Instruction *CxtI) {
528  // We can always evaluate constants shifted.
529  if (isa<Constant>(V))
530  return true;
531 
532  Instruction *I = dyn_cast<Instruction>(V);
533  if (!I) return false;
534 
535  // We can't mutate something that has multiple uses: doing so would
536  // require duplicating the instruction in general, which isn't profitable.
537  if (!I->hasOneUse()) return false;
538 
539  switch (I->getOpcode()) {
540  default: return false;
541  case Instruction::And:
542  case Instruction::Or:
543  case Instruction::Xor:
544  // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
545  return canEvaluateShifted(I->getOperand(0), NumBits, IsLeftShift, IC, I) &&
546  canEvaluateShifted(I->getOperand(1), NumBits, IsLeftShift, IC, I);
547 
548  case Instruction::Shl:
549  case Instruction::LShr:
550  return canEvaluateShiftedShift(NumBits, IsLeftShift, I, IC, CxtI);
551 
552  case Instruction::Select: {
553  SelectInst *SI = cast<SelectInst>(I);
554  Value *TrueVal = SI->getTrueValue();
555  Value *FalseVal = SI->getFalseValue();
556  return canEvaluateShifted(TrueVal, NumBits, IsLeftShift, IC, SI) &&
557  canEvaluateShifted(FalseVal, NumBits, IsLeftShift, IC, SI);
558  }
559  case Instruction::PHI: {
560  // We can change a phi if we can change all operands. Note that we never
561  // get into trouble with cyclic PHIs here because we only consider
562  // instructions with a single use.
563  PHINode *PN = cast<PHINode>(I);
564  for (Value *IncValue : PN->incoming_values())
565  if (!canEvaluateShifted(IncValue, NumBits, IsLeftShift, IC, PN))
566  return false;
567  return true;
568  }
569  case Instruction::Mul: {
570  const APInt *MulConst;
571  // We can fold (shr (mul X, -(1 << C)), C) -> (and (neg X), C`)
572  return !IsLeftShift && match(I->getOperand(1), m_APInt(MulConst)) &&
573  MulConst->isNegatedPowerOf2() &&
574  MulConst->countTrailingZeros() == NumBits;
575  }
576  }
577 }
578 
579 /// Fold OuterShift (InnerShift X, C1), C2.
580 /// See canEvaluateShiftedShift() for the constraints on these instructions.
581 static Value *foldShiftedShift(BinaryOperator *InnerShift, unsigned OuterShAmt,
582  bool IsOuterShl,
584  bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl;
585  Type *ShType = InnerShift->getType();
586  unsigned TypeWidth = ShType->getScalarSizeInBits();
587 
588  // We only accept shifts-by-a-constant in canEvaluateShifted().
589  const APInt *C1;
590  match(InnerShift->getOperand(1), m_APInt(C1));
591  unsigned InnerShAmt = C1->getZExtValue();
592 
593  // Change the shift amount and clear the appropriate IR flags.
594  auto NewInnerShift = [&](unsigned ShAmt) {
595  InnerShift->setOperand(1, ConstantInt::get(ShType, ShAmt));
596  if (IsInnerShl) {
597  InnerShift->setHasNoUnsignedWrap(false);
598  InnerShift->setHasNoSignedWrap(false);
599  } else {
600  InnerShift->setIsExact(false);
601  }
602  return InnerShift;
603  };
604 
605  // Two logical shifts in the same direction:
606  // shl (shl X, C1), C2 --> shl X, C1 + C2
607  // lshr (lshr X, C1), C2 --> lshr X, C1 + C2
608  if (IsInnerShl == IsOuterShl) {
609  // If this is an oversized composite shift, then unsigned shifts get 0.
610  if (InnerShAmt + OuterShAmt >= TypeWidth)
611  return Constant::getNullValue(ShType);
612 
613  return NewInnerShift(InnerShAmt + OuterShAmt);
614  }
615 
616  // Equal shift amounts in opposite directions become bitwise 'and':
617  // lshr (shl X, C), C --> and X, C'
618  // shl (lshr X, C), C --> and X, C'
619  if (InnerShAmt == OuterShAmt) {
620  APInt Mask = IsInnerShl
621  ? APInt::getLowBitsSet(TypeWidth, TypeWidth - OuterShAmt)
622  : APInt::getHighBitsSet(TypeWidth, TypeWidth - OuterShAmt);
623  Value *And = Builder.CreateAnd(InnerShift->getOperand(0),
624  ConstantInt::get(ShType, Mask));
625  if (auto *AndI = dyn_cast<Instruction>(And)) {
626  AndI->moveBefore(InnerShift);
627  AndI->takeName(InnerShift);
628  }
629  return And;
630  }
631 
632  assert(InnerShAmt > OuterShAmt &&
633  "Unexpected opposite direction logical shift pair");
634 
635  // In general, we would need an 'and' for this transform, but
636  // canEvaluateShiftedShift() guarantees that the masked-off bits are not used.
637  // lshr (shl X, C1), C2 --> shl X, C1 - C2
638  // shl (lshr X, C1), C2 --> lshr X, C1 - C2
639  return NewInnerShift(InnerShAmt - OuterShAmt);
640 }
641 
642 /// When canEvaluateShifted() returns true for an expression, this function
643 /// inserts the new computation that produces the shifted value.
644 static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
645  InstCombinerImpl &IC, const DataLayout &DL) {
646  // We can always evaluate constants shifted.
647  if (Constant *C = dyn_cast<Constant>(V)) {
648  if (isLeftShift)
649  return IC.Builder.CreateShl(C, NumBits);
650  else
651  return IC.Builder.CreateLShr(C, NumBits);
652  }
653 
654  Instruction *I = cast<Instruction>(V);
655  IC.addToWorklist(I);
656 
657  switch (I->getOpcode()) {
658  default: llvm_unreachable("Inconsistency with CanEvaluateShifted");
659  case Instruction::And:
660  case Instruction::Or:
661  case Instruction::Xor:
662  // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
663  I->setOperand(
664  0, getShiftedValue(I->getOperand(0), NumBits, isLeftShift, IC, DL));
665  I->setOperand(
666  1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
667  return I;
668 
669  case Instruction::Shl:
670  case Instruction::LShr:
671  return foldShiftedShift(cast<BinaryOperator>(I), NumBits, isLeftShift,
672  IC.Builder);
673 
674  case Instruction::Select:
675  I->setOperand(
676  1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
677  I->setOperand(
678  2, getShiftedValue(I->getOperand(2), NumBits, isLeftShift, IC, DL));
679  return I;
680  case Instruction::PHI: {
681  // We can change a phi if we can change all operands. Note that we never
682  // get into trouble with cyclic PHIs here because we only consider
683  // instructions with a single use.
684  PHINode *PN = cast<PHINode>(I);
685  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
687  isLeftShift, IC, DL));
688  return PN;
689  }
690  case Instruction::Mul: {
691  assert(!isLeftShift && "Unexpected shift direction!");
692  auto *Neg = BinaryOperator::CreateNeg(I->getOperand(0));
693  IC.InsertNewInstWith(Neg, *I);
694  unsigned TypeWidth = I->getType()->getScalarSizeInBits();
695  APInt Mask = APInt::getLowBitsSet(TypeWidth, TypeWidth - NumBits);
696  auto *And = BinaryOperator::CreateAnd(Neg,
697  ConstantInt::get(I->getType(), Mask));
698  And->takeName(I);
699  return IC.InsertNewInstWith(And, *I);
700  }
701  }
702 }
703 
704 // If this is a bitwise operator or add with a constant RHS we might be able
705 // to pull it through a shift.
707  BinaryOperator *BO) {
708  switch (BO->getOpcode()) {
709  default:
710  return false; // Do not perform transform!
711  case Instruction::Add:
712  return Shift.getOpcode() == Instruction::Shl;
713  case Instruction::Or:
714  case Instruction::And:
715  return true;
716  case Instruction::Xor:
717  // Do not change a 'not' of logical shift because that would create a normal
718  // 'xor'. The 'not' is likely better for analysis, SCEV, and codegen.
719  return !(Shift.isLogicalShift() && match(BO, m_Not(m_Value())));
720  }
721 }
722 
724  BinaryOperator &I) {
725  // (C2 << X) << C1 --> (C2 << C1) << X
726  // (C2 >> X) >> C1 --> (C2 >> C1) >> X
727  Constant *C2;
728  Value *X;
729  if (match(Op0, m_BinOp(I.getOpcode(), m_Constant(C2), m_Value(X))))
730  return BinaryOperator::Create(
731  I.getOpcode(), Builder.CreateBinOp(I.getOpcode(), C2, C1), X);
732 
733  bool IsLeftShift = I.getOpcode() == Instruction::Shl;
734  Type *Ty = I.getType();
735  unsigned TypeBits = Ty->getScalarSizeInBits();
736 
737  // (X / +DivC) >> (Width - 1) --> ext (X <= -DivC)
738  // (X / -DivC) >> (Width - 1) --> ext (X >= +DivC)
739  const APInt *DivC;
740  if (!IsLeftShift && match(C1, m_SpecificIntAllowUndef(TypeBits - 1)) &&
741  match(Op0, m_SDiv(m_Value(X), m_APInt(DivC))) && !DivC->isZero() &&
742  !DivC->isMinSignedValue()) {
743  Constant *NegDivC = ConstantInt::get(Ty, -(*DivC));
744  ICmpInst::Predicate Pred =
746  Value *Cmp = Builder.CreateICmp(Pred, X, NegDivC);
747  auto ExtOpcode = (I.getOpcode() == Instruction::AShr) ? Instruction::SExt
748  : Instruction::ZExt;
749  return CastInst::Create(ExtOpcode, Cmp, Ty);
750  }
751 
752  const APInt *Op1C;
753  if (!match(C1, m_APInt(Op1C)))
754  return nullptr;
755 
756  assert(!Op1C->uge(TypeBits) &&
757  "Shift over the type width should have been removed already");
758 
759  // See if we can propagate this shift into the input, this covers the trivial
760  // cast of lshr(shl(x,c1),c2) as well as other more complex cases.
761  if (I.getOpcode() != Instruction::AShr &&
762  canEvaluateShifted(Op0, Op1C->getZExtValue(), IsLeftShift, *this, &I)) {
763  LLVM_DEBUG(
764  dbgs() << "ICE: GetShiftedValue propagating shift through expression"
765  " to eliminate shift:\n IN: "
766  << *Op0 << "\n SH: " << I << "\n");
767 
768  return replaceInstUsesWith(
769  I, getShiftedValue(Op0, Op1C->getZExtValue(), IsLeftShift, *this, DL));
770  }
771 
772  if (Instruction *FoldedShift = foldBinOpIntoSelectOrPhi(I))
773  return FoldedShift;
774 
775  if (!Op0->hasOneUse())
776  return nullptr;
777 
778  if (auto *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
779  // If the operand is a bitwise operator with a constant RHS, and the
780  // shift is the only use, we can pull it out of the shift.
781  const APInt *Op0C;
782  if (match(Op0BO->getOperand(1), m_APInt(Op0C))) {
783  if (canShiftBinOpWithConstantRHS(I, Op0BO)) {
784  Value *NewRHS =
785  Builder.CreateBinOp(I.getOpcode(), Op0BO->getOperand(1), C1);
786 
787  Value *NewShift =
788  Builder.CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), C1);
789  NewShift->takeName(Op0BO);
790 
791  return BinaryOperator::Create(Op0BO->getOpcode(), NewShift, NewRHS);
792  }
793  }
794  }
795 
796  // If we have a select that conditionally executes some binary operator,
797  // see if we can pull it the select and operator through the shift.
798  //
799  // For example, turning:
800  // shl (select C, (add X, C1), X), C2
801  // Into:
802  // Y = shl X, C2
803  // select C, (add Y, C1 << C2), Y
804  Value *Cond;
805  BinaryOperator *TBO;
806  Value *FalseVal;
807  if (match(Op0, m_Select(m_Value(Cond), m_OneUse(m_BinOp(TBO)),
808  m_Value(FalseVal)))) {
809  const APInt *C;
810  if (!isa<Constant>(FalseVal) && TBO->getOperand(0) == FalseVal &&
811  match(TBO->getOperand(1), m_APInt(C)) &&
813  Value *NewRHS =
814  Builder.CreateBinOp(I.getOpcode(), TBO->getOperand(1), C1);
815 
816  Value *NewShift = Builder.CreateBinOp(I.getOpcode(), FalseVal, C1);
817  Value *NewOp = Builder.CreateBinOp(TBO->getOpcode(), NewShift, NewRHS);
818  return SelectInst::Create(Cond, NewOp, NewShift);
819  }
820  }
821 
822  BinaryOperator *FBO;
823  Value *TrueVal;
825  m_OneUse(m_BinOp(FBO))))) {
826  const APInt *C;
827  if (!isa<Constant>(TrueVal) && FBO->getOperand(0) == TrueVal &&
828  match(FBO->getOperand(1), m_APInt(C)) &&
830  Value *NewRHS =
831  Builder.CreateBinOp(I.getOpcode(), FBO->getOperand(1), C1);
832 
833  Value *NewShift = Builder.CreateBinOp(I.getOpcode(), TrueVal, C1);
834  Value *NewOp = Builder.CreateBinOp(FBO->getOpcode(), NewShift, NewRHS);
835  return SelectInst::Create(Cond, NewShift, NewOp);
836  }
837  }
838 
839  return nullptr;
840 }
841 
843  const SimplifyQuery Q = SQ.getWithInstruction(&I);
844 
845  if (Value *V = simplifyShlInst(I.getOperand(0), I.getOperand(1),
846  I.hasNoSignedWrap(), I.hasNoUnsignedWrap(), Q))
847  return replaceInstUsesWith(I, V);
848 
849  if (Instruction *X = foldVectorBinop(I))
850  return X;
851 
852  if (Instruction *V = commonShiftTransforms(I))
853  return V;
854 
856  return V;
857 
858  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
859  Type *Ty = I.getType();
860  unsigned BitWidth = Ty->getScalarSizeInBits();
861 
862  const APInt *C;
863  if (match(Op1, m_APInt(C))) {
864  unsigned ShAmtC = C->getZExtValue();
865 
866  // shl (zext X), C --> zext (shl X, C)
867  // This is only valid if X would have zeros shifted out.
868  Value *X;
869  if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) {
870  unsigned SrcWidth = X->getType()->getScalarSizeInBits();
871  if (ShAmtC < SrcWidth &&
872  MaskedValueIsZero(X, APInt::getHighBitsSet(SrcWidth, ShAmtC), 0, &I))
873  return new ZExtInst(Builder.CreateShl(X, ShAmtC), Ty);
874  }
875 
876  // (X >> C) << C --> X & (-1 << C)
877  if (match(Op0, m_Shr(m_Value(X), m_Specific(Op1)))) {
879  return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
880  }
881 
882  const APInt *C1;
883  if (match(Op0, m_Exact(m_Shr(m_Value(X), m_APInt(C1)))) &&
884  C1->ult(BitWidth)) {
885  unsigned ShrAmt = C1->getZExtValue();
886  if (ShrAmt < ShAmtC) {
887  // If C1 < C: (X >>?,exact C1) << C --> X << (C - C1)
888  Constant *ShiftDiff = ConstantInt::get(Ty, ShAmtC - ShrAmt);
889  auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
890  NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
891  NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
892  return NewShl;
893  }
894  if (ShrAmt > ShAmtC) {
895  // If C1 > C: (X >>?exact C1) << C --> X >>?exact (C1 - C)
896  Constant *ShiftDiff = ConstantInt::get(Ty, ShrAmt - ShAmtC);
897  auto *NewShr = BinaryOperator::Create(
898  cast<BinaryOperator>(Op0)->getOpcode(), X, ShiftDiff);
899  NewShr->setIsExact(true);
900  return NewShr;
901  }
902  }
903 
904  if (match(Op0, m_OneUse(m_Shr(m_Value(X), m_APInt(C1)))) &&
905  C1->ult(BitWidth)) {
906  unsigned ShrAmt = C1->getZExtValue();
907  if (ShrAmt < ShAmtC) {
908  // If C1 < C: (X >>? C1) << C --> (X << (C - C1)) & (-1 << C)
909  Constant *ShiftDiff = ConstantInt::get(Ty, ShAmtC - ShrAmt);
910  auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
911  NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
912  NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
913  Builder.Insert(NewShl);
915  return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
916  }
917  if (ShrAmt > ShAmtC) {
918  // If C1 > C: (X >>? C1) << C --> (X >>? (C1 - C)) & (-1 << C)
919  Constant *ShiftDiff = ConstantInt::get(Ty, ShrAmt - ShAmtC);
920  auto *OldShr = cast<BinaryOperator>(Op0);
921  auto *NewShr =
922  BinaryOperator::Create(OldShr->getOpcode(), X, ShiftDiff);
923  NewShr->setIsExact(OldShr->isExact());
924  Builder.Insert(NewShr);
926  return BinaryOperator::CreateAnd(NewShr, ConstantInt::get(Ty, Mask));
927  }
928  }
929 
930  // Similar to above, but look through an intermediate trunc instruction.
931  BinaryOperator *Shr;
932  if (match(Op0, m_OneUse(m_Trunc(m_OneUse(m_BinOp(Shr))))) &&
933  match(Shr, m_Shr(m_Value(X), m_APInt(C1)))) {
934  // The larger shift direction survives through the transform.
935  unsigned ShrAmtC = C1->getZExtValue();
936  unsigned ShDiff = ShrAmtC > ShAmtC ? ShrAmtC - ShAmtC : ShAmtC - ShrAmtC;
937  Constant *ShiftDiffC = ConstantInt::get(X->getType(), ShDiff);
938  auto ShiftOpc = ShrAmtC > ShAmtC ? Shr->getOpcode() : Instruction::Shl;
939 
940  // If C1 > C:
941  // (trunc (X >> C1)) << C --> (trunc (X >> (C1 - C))) && (-1 << C)
942  // If C > C1:
943  // (trunc (X >> C1)) << C --> (trunc (X << (C - C1))) && (-1 << C)
944  Value *NewShift = Builder.CreateBinOp(ShiftOpc, X, ShiftDiffC, "sh.diff");
945  Value *Trunc = Builder.CreateTrunc(NewShift, Ty, "tr.sh.diff");
947  return BinaryOperator::CreateAnd(Trunc, ConstantInt::get(Ty, Mask));
948  }
949 
950  if (match(Op0, m_Shl(m_Value(X), m_APInt(C1))) && C1->ult(BitWidth)) {
951  unsigned AmtSum = ShAmtC + C1->getZExtValue();
952  // Oversized shifts are simplified to zero in InstSimplify.
953  if (AmtSum < BitWidth)
954  // (X << C1) << C2 --> X << (C1 + C2)
955  return BinaryOperator::CreateShl(X, ConstantInt::get(Ty, AmtSum));
956  }
957 
958  // If we have an opposite shift by the same amount, we may be able to
959  // reorder binops and shifts to eliminate math/logic.
960  auto isSuitableBinOpcode = [](Instruction::BinaryOps BinOpcode) {
961  switch (BinOpcode) {
962  default:
963  return false;
964  case Instruction::Add:
965  case Instruction::And:
966  case Instruction::Or:
967  case Instruction::Xor:
968  case Instruction::Sub:
969  // NOTE: Sub is not commutable and the tranforms below may not be valid
970  // when the shift-right is operand 1 (RHS) of the sub.
971  return true;
972  }
973  };
974  BinaryOperator *Op0BO;
975  if (match(Op0, m_OneUse(m_BinOp(Op0BO))) &&
976  isSuitableBinOpcode(Op0BO->getOpcode())) {
977  // Commute so shift-right is on LHS of the binop.
978  // (Y bop (X >> C)) << C -> ((X >> C) bop Y) << C
979  // (Y bop ((X >> C) & CC)) << C -> (((X >> C) & CC) bop Y) << C
980  Value *Shr = Op0BO->getOperand(0);
981  Value *Y = Op0BO->getOperand(1);
982  Value *X;
983  const APInt *CC;
984  if (Op0BO->isCommutative() && Y->hasOneUse() &&
985  (match(Y, m_Shr(m_Value(), m_Specific(Op1))) ||
987  m_APInt(CC)))))
988  std::swap(Shr, Y);
989 
990  // ((X >> C) bop Y) << C -> (X bop (Y << C)) & (~0 << C)
991  if (match(Shr, m_OneUse(m_Shr(m_Value(X), m_Specific(Op1))))) {
992  // Y << C
993  Value *YS = Builder.CreateShl(Y, Op1, Op0BO->getName());
994  // (X bop (Y << C))
995  Value *B =
996  Builder.CreateBinOp(Op0BO->getOpcode(), X, YS, Shr->getName());
997  unsigned Op1Val = C->getLimitedValue(BitWidth);
1000  return BinaryOperator::CreateAnd(B, Mask);
1001  }
1002 
1003  // (((X >> C) & CC) bop Y) << C -> (X & (CC << C)) bop (Y << C)
1004  if (match(Shr,
1006  m_APInt(CC))))) {
1007  // Y << C
1008  Value *YS = Builder.CreateShl(Y, Op1, Op0BO->getName());
1009  // X & (CC << C)
1010  Value *M = Builder.CreateAnd(X, ConstantInt::get(Ty, CC->shl(*C)),
1011  X->getName() + ".mask");
1012  return BinaryOperator::Create(Op0BO->getOpcode(), M, YS);
1013  }
1014  }
1015 
1016  // (C1 - X) << C --> (C1 << C) - (X << C)
1017  if (match(Op0, m_OneUse(m_Sub(m_APInt(C1), m_Value(X))))) {
1018  Constant *NewLHS = ConstantInt::get(Ty, C1->shl(*C));
1019  Value *NewShift = Builder.CreateShl(X, Op1);
1020  return BinaryOperator::CreateSub(NewLHS, NewShift);
1021  }
1022 
1023  // If the shifted-out value is known-zero, then this is a NUW shift.
1024  if (!I.hasNoUnsignedWrap() &&
1026  &I)) {
1027  I.setHasNoUnsignedWrap();
1028  return &I;
1029  }
1030 
1031  // If the shifted-out value is all signbits, then this is a NSW shift.
1032  if (!I.hasNoSignedWrap() && ComputeNumSignBits(Op0, 0, &I) > ShAmtC) {
1033  I.setHasNoSignedWrap();
1034  return &I;
1035  }
1036  }
1037 
1038  // Transform (x >> y) << y to x & (-1 << y)
1039  // Valid for any type of right-shift.
1040  Value *X;
1041  if (match(Op0, m_OneUse(m_Shr(m_Value(X), m_Specific(Op1))))) {
1042  Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
1043  Value *Mask = Builder.CreateShl(AllOnes, Op1);
1044  return BinaryOperator::CreateAnd(Mask, X);
1045  }
1046 
1047  Constant *C1;
1048  if (match(Op1, m_Constant(C1))) {
1049  Constant *C2;
1050  Value *X;
1051  // (X * C2) << C1 --> X * (C2 << C1)
1052  if (match(Op0, m_Mul(m_Value(X), m_Constant(C2))))
1054 
1055  // shl (zext i1 X), C1 --> select (X, 1 << C1, 0)
1056  if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1057  auto *NewC = ConstantExpr::getShl(ConstantInt::get(Ty, 1), C1);
1058  return SelectInst::Create(X, NewC, ConstantInt::getNullValue(Ty));
1059  }
1060  }
1061 
1062  // (1 << (C - x)) -> ((1 << C) >> x) if C is bitwidth - 1
1063  if (match(Op0, m_One()) &&
1064  match(Op1, m_Sub(m_SpecificInt(BitWidth - 1), m_Value(X))))
1065  return BinaryOperator::CreateLShr(
1067 
1068  return nullptr;
1069 }
1070 
1072  if (Value *V = simplifyLShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
1073  SQ.getWithInstruction(&I)))
1074  return replaceInstUsesWith(I, V);
1075 
1076  if (Instruction *X = foldVectorBinop(I))
1077  return X;
1078 
1079  if (Instruction *R = commonShiftTransforms(I))
1080  return R;
1081 
1082  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1083  Type *Ty = I.getType();
1084  const APInt *C;
1085  if (match(Op1, m_APInt(C))) {
1086  unsigned ShAmtC = C->getZExtValue();
1087  unsigned BitWidth = Ty->getScalarSizeInBits();
1088  auto *II = dyn_cast<IntrinsicInst>(Op0);
1089  if (II && isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == ShAmtC &&
1090  (II->getIntrinsicID() == Intrinsic::ctlz ||
1091  II->getIntrinsicID() == Intrinsic::cttz ||
1092  II->getIntrinsicID() == Intrinsic::ctpop)) {
1093  // ctlz.i32(x)>>5 --> zext(x == 0)
1094  // cttz.i32(x)>>5 --> zext(x == 0)
1095  // ctpop.i32(x)>>5 --> zext(x == -1)
1096  bool IsPop = II->getIntrinsicID() == Intrinsic::ctpop;
1097  Constant *RHS = ConstantInt::getSigned(Ty, IsPop ? -1 : 0);
1098  Value *Cmp = Builder.CreateICmpEQ(II->getArgOperand(0), RHS);
1099  return new ZExtInst(Cmp, Ty);
1100  }
1101 
1102  Value *X;
1103  const APInt *C1;
1104  if (match(Op0, m_Shl(m_Value(X), m_APInt(C1))) && C1->ult(BitWidth)) {
1105  if (C1->ult(ShAmtC)) {
1106  unsigned ShlAmtC = C1->getZExtValue();
1107  Constant *ShiftDiff = ConstantInt::get(Ty, ShAmtC - ShlAmtC);
1108  if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
1109  // (X <<nuw C1) >>u C --> X >>u (C - C1)
1110  auto *NewLShr = BinaryOperator::CreateLShr(X, ShiftDiff);
1111  NewLShr->setIsExact(I.isExact());
1112  return NewLShr;
1113  }
1114  if (Op0->hasOneUse()) {
1115  // (X << C1) >>u C --> (X >>u (C - C1)) & (-1 >> C)
1116  Value *NewLShr = Builder.CreateLShr(X, ShiftDiff, "", I.isExact());
1118  return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask));
1119  }
1120  } else if (C1->ugt(ShAmtC)) {
1121  unsigned ShlAmtC = C1->getZExtValue();
1122  Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmtC - ShAmtC);
1123  if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
1124  // (X <<nuw C1) >>u C --> X <<nuw (C1 - C)
1125  auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
1126  NewShl->setHasNoUnsignedWrap(true);
1127  return NewShl;
1128  }
1129  if (Op0->hasOneUse()) {
1130  // (X << C1) >>u C --> X << (C1 - C) & (-1 >> C)
1131  Value *NewShl = Builder.CreateShl(X, ShiftDiff);
1133  return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
1134  }
1135  } else {
1136  assert(*C1 == ShAmtC);
1137  // (X << C) >>u C --> X & (-1 >>u C)
1139  return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
1140  }
1141  }
1142 
1143  // ((X << C) + Y) >>u C --> (X + (Y >>u C)) & (-1 >>u C)
1144  // TODO: Consolidate with the more general transform that starts from shl
1145  // (the shifts are in the opposite order).
1146  Value *Y;
1147  if (match(Op0,
1149  m_Value(Y))))) {
1150  Value *NewLshr = Builder.CreateLShr(Y, Op1);
1151  Value *NewAdd = Builder.CreateAdd(NewLshr, X);
1152  unsigned Op1Val = C->getLimitedValue(BitWidth);
1155  return BinaryOperator::CreateAnd(NewAdd, Mask);
1156  }
1157 
1158  if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) &&
1159  (!Ty->isIntegerTy() || shouldChangeType(Ty, X->getType()))) {
1160  assert(ShAmtC < X->getType()->getScalarSizeInBits() &&
1161  "Big shift not simplified to zero?");
1162  // lshr (zext iM X to iN), C --> zext (lshr X, C) to iN
1163  Value *NewLShr = Builder.CreateLShr(X, ShAmtC);
1164  return new ZExtInst(NewLShr, Ty);
1165  }
1166 
1167  if (match(Op0, m_SExt(m_Value(X)))) {
1168  unsigned SrcTyBitWidth = X->getType()->getScalarSizeInBits();
1169  // lshr (sext i1 X to iN), C --> select (X, -1 >> C, 0)
1170  if (SrcTyBitWidth == 1) {
1171  auto *NewC = ConstantInt::get(
1172  Ty, APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
1173  return SelectInst::Create(X, NewC, ConstantInt::getNullValue(Ty));
1174  }
1175 
1176  if ((!Ty->isIntegerTy() || shouldChangeType(Ty, X->getType())) &&
1177  Op0->hasOneUse()) {
1178  // Are we moving the sign bit to the low bit and widening with high
1179  // zeros? lshr (sext iM X to iN), N-1 --> zext (lshr X, M-1) to iN
1180  if (ShAmtC == BitWidth - 1) {
1181  Value *NewLShr = Builder.CreateLShr(X, SrcTyBitWidth - 1);
1182  return new ZExtInst(NewLShr, Ty);
1183  }
1184 
1185  // lshr (sext iM X to iN), N-M --> zext (ashr X, min(N-M, M-1)) to iN
1186  if (ShAmtC == BitWidth - SrcTyBitWidth) {
1187  // The new shift amount can't be more than the narrow source type.
1188  unsigned NewShAmt = std::min(ShAmtC, SrcTyBitWidth - 1);
1189  Value *AShr = Builder.CreateAShr(X, NewShAmt);
1190  return new ZExtInst(AShr, Ty);
1191  }
1192  }
1193  }
1194 
1195  if (ShAmtC == BitWidth - 1) {
1196  // lshr i32 or(X,-X), 31 --> zext (X != 0)
1197  if (match(Op0, m_OneUse(m_c_Or(m_Neg(m_Value(X)), m_Deferred(X)))))
1198  return new ZExtInst(Builder.CreateIsNotNull(X), Ty);
1199 
1200  // lshr i32 (X -nsw Y), 31 --> zext (X < Y)
1201  if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
1202  return new ZExtInst(Builder.CreateICmpSLT(X, Y), Ty);
1203 
1204  // Check if a number is negative and odd:
1205  // lshr i32 (srem X, 2), 31 --> and (X >> 31), X
1206  if (match(Op0, m_OneUse(m_SRem(m_Value(X), m_SpecificInt(2))))) {
1207  Value *Signbit = Builder.CreateLShr(X, ShAmtC);
1208  return BinaryOperator::CreateAnd(Signbit, X);
1209  }
1210  }
1211 
1212  // (X >>u C1) >>u C --> X >>u (C1 + C)
1213  if (match(Op0, m_LShr(m_Value(X), m_APInt(C1)))) {
1214  // Oversized shifts are simplified to zero in InstSimplify.
1215  unsigned AmtSum = ShAmtC + C1->getZExtValue();
1216  if (AmtSum < BitWidth)
1217  return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
1218  }
1219 
1220  Instruction *TruncSrc;
1221  if (match(Op0, m_OneUse(m_Trunc(m_Instruction(TruncSrc)))) &&
1222  match(TruncSrc, m_LShr(m_Value(X), m_APInt(C1)))) {
1223  unsigned SrcWidth = X->getType()->getScalarSizeInBits();
1224  unsigned AmtSum = ShAmtC + C1->getZExtValue();
1225 
1226  // If the combined shift fits in the source width:
1227  // (trunc (X >>u C1)) >>u C --> and (trunc (X >>u (C1 + C)), MaskC
1228  //
1229  // If the first shift covers the number of bits truncated, then the
1230  // mask instruction is eliminated (and so the use check is relaxed).
1231  if (AmtSum < SrcWidth &&
1232  (TruncSrc->hasOneUse() || C1->uge(SrcWidth - BitWidth))) {
1233  Value *SumShift = Builder.CreateLShr(X, AmtSum, "sum.shift");
1234  Value *Trunc = Builder.CreateTrunc(SumShift, Ty, I.getName());
1235 
1236  // If the first shift does not cover the number of bits truncated, then
1237  // we require a mask to get rid of high bits in the result.
1238  APInt MaskC = APInt::getAllOnes(BitWidth).lshr(ShAmtC);
1239  return BinaryOperator::CreateAnd(Trunc, ConstantInt::get(Ty, MaskC));
1240  }
1241  }
1242 
1243  const APInt *MulC;
1244  if (match(Op0, m_NUWMul(m_Value(X), m_APInt(MulC)))) {
1245  // Look for a "splat" mul pattern - it replicates bits across each half of
1246  // a value, so a right shift is just a mask of the low bits:
1247  // lshr i[2N] (mul nuw X, (2^N)+1), N --> and iN X, (2^N)-1
1248  // TODO: Generalize to allow more than just half-width shifts?
1249  if (BitWidth > 2 && ShAmtC * 2 == BitWidth && (*MulC - 1).isPowerOf2() &&
1250  MulC->logBase2() == ShAmtC)
1251  return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *MulC - 2));
1252 
1253  // The one-use check is not strictly necessary, but codegen may not be
1254  // able to invert the transform and perf may suffer with an extra mul
1255  // instruction.
1256  if (Op0->hasOneUse()) {
1257  APInt NewMulC = MulC->lshr(ShAmtC);
1258  // if c is divisible by (1 << ShAmtC):
1259  // lshr (mul nuw x, MulC), ShAmtC -> mul nuw x, (MulC >> ShAmtC)
1260  if (MulC->eq(NewMulC.shl(ShAmtC))) {
1261  auto *NewMul =
1262  BinaryOperator::CreateNUWMul(X, ConstantInt::get(Ty, NewMulC));
1263  BinaryOperator *OrigMul = cast<BinaryOperator>(Op0);
1264  NewMul->setHasNoSignedWrap(OrigMul->hasNoSignedWrap());
1265  return NewMul;
1266  }
1267  }
1268  }
1269 
1270  // Try to narrow bswap.
1271  // In the case where the shift amount equals the bitwidth difference, the
1272  // shift is eliminated.
1273  if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::bswap>(
1274  m_OneUse(m_ZExt(m_Value(X))))))) {
1275  unsigned SrcWidth = X->getType()->getScalarSizeInBits();
1276  unsigned WidthDiff = BitWidth - SrcWidth;
1277  if (SrcWidth % 16 == 0) {
1278  Value *NarrowSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X);
1279  if (ShAmtC >= WidthDiff) {
1280  // (bswap (zext X)) >> C --> zext (bswap X >> C')
1281  Value *NewShift = Builder.CreateLShr(NarrowSwap, ShAmtC - WidthDiff);
1282  return new ZExtInst(NewShift, Ty);
1283  } else {
1284  // (bswap (zext X)) >> C --> (zext (bswap X)) << C'
1285  Value *NewZExt = Builder.CreateZExt(NarrowSwap, Ty);
1286  Constant *ShiftDiff = ConstantInt::get(Ty, WidthDiff - ShAmtC);
1287  return BinaryOperator::CreateShl(NewZExt, ShiftDiff);
1288  }
1289  }
1290  }
1291 
1292  // If the shifted-out value is known-zero, then this is an exact shift.
1293  if (!I.isExact() &&
1294  MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmtC), 0, &I)) {
1295  I.setIsExact();
1296  return &I;
1297  }
1298  }
1299 
1300  // Transform (x << y) >> y to x & (-1 >> y)
1301  Value *X;
1302  if (match(Op0, m_OneUse(m_Shl(m_Value(X), m_Specific(Op1))))) {
1303  Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
1304  Value *Mask = Builder.CreateLShr(AllOnes, Op1);
1305  return BinaryOperator::CreateAnd(Mask, X);
1306  }
1307 
1308  return nullptr;
1309 }
1310 
1311 Instruction *
1313  BinaryOperator &OldAShr) {
1314  assert(OldAShr.getOpcode() == Instruction::AShr &&
1315  "Must be called with arithmetic right-shift instruction only.");
1316 
1317  // Check that constant C is a splat of the element-wise bitwidth of V.
1318  auto BitWidthSplat = [](Constant *C, Value *V) {
1319  return match(
1320  C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1321  APInt(C->getType()->getScalarSizeInBits(),
1322  V->getType()->getScalarSizeInBits())));
1323  };
1324 
1325  // It should look like variable-length sign-extension on the outside:
1326  // (Val << (bitwidth(Val)-Nbits)) a>> (bitwidth(Val)-Nbits)
1327  Value *NBits;
1328  Instruction *MaybeTrunc;
1329  Constant *C1, *C2;
1330  if (!match(&OldAShr,
1331  m_AShr(m_Shl(m_Instruction(MaybeTrunc),
1333  m_ZExtOrSelf(m_Value(NBits))))),
1335  m_ZExtOrSelf(m_Deferred(NBits)))))) ||
1336  !BitWidthSplat(C1, &OldAShr) || !BitWidthSplat(C2, &OldAShr))
1337  return nullptr;
1338 
1339  // There may or may not be a truncation after outer two shifts.
1340  Instruction *HighBitExtract;
1341  match(MaybeTrunc, m_TruncOrSelf(m_Instruction(HighBitExtract)));
1342  bool HadTrunc = MaybeTrunc != HighBitExtract;
1343 
1344  // And finally, the innermost part of the pattern must be a right-shift.
1345  Value *X, *NumLowBitsToSkip;
1346  if (!match(HighBitExtract, m_Shr(m_Value(X), m_Value(NumLowBitsToSkip))))
1347  return nullptr;
1348 
1349  // Said right-shift must extract high NBits bits - C0 must be it's bitwidth.
1350  Constant *C0;
1351  if (!match(NumLowBitsToSkip,
1352  m_ZExtOrSelf(
1353  m_Sub(m_Constant(C0), m_ZExtOrSelf(m_Specific(NBits))))) ||
1354  !BitWidthSplat(C0, HighBitExtract))
1355  return nullptr;
1356 
1357  // Since the NBits is identical for all shifts, if the outermost and
1358  // innermost shifts are identical, then outermost shifts are redundant.
1359  // If we had truncation, do keep it though.
1360  if (HighBitExtract->getOpcode() == OldAShr.getOpcode())
1361  return replaceInstUsesWith(OldAShr, MaybeTrunc);
1362 
1363  // Else, if there was a truncation, then we need to ensure that one
1364  // instruction will go away.
1365  if (HadTrunc && !match(&OldAShr, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
1366  return nullptr;
1367 
1368  // Finally, bypass two innermost shifts, and perform the outermost shift on
1369  // the operands of the innermost shift.
1370  Instruction *NewAShr =
1371  BinaryOperator::Create(OldAShr.getOpcode(), X, NumLowBitsToSkip);
1372  NewAShr->copyIRFlags(HighBitExtract); // We can preserve 'exact'-ness.
1373  if (!HadTrunc)
1374  return NewAShr;
1375 
1376  Builder.Insert(NewAShr);
1377  return TruncInst::CreateTruncOrBitCast(NewAShr, OldAShr.getType());
1378 }
1379 
1381  if (Value *V = simplifyAShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
1382  SQ.getWithInstruction(&I)))
1383  return replaceInstUsesWith(I, V);
1384 
1385  if (Instruction *X = foldVectorBinop(I))
1386  return X;
1387 
1388  if (Instruction *R = commonShiftTransforms(I))
1389  return R;
1390 
1391  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1392  Type *Ty = I.getType();
1393  unsigned BitWidth = Ty->getScalarSizeInBits();
1394  const APInt *ShAmtAPInt;
1395  if (match(Op1, m_APInt(ShAmtAPInt)) && ShAmtAPInt->ult(BitWidth)) {
1396  unsigned ShAmt = ShAmtAPInt->getZExtValue();
1397 
1398  // If the shift amount equals the difference in width of the destination
1399  // and source scalar types:
1400  // ashr (shl (zext X), C), C --> sext X
1401  Value *X;
1402  if (match(Op0, m_Shl(m_ZExt(m_Value(X)), m_Specific(Op1))) &&
1403  ShAmt == BitWidth - X->getType()->getScalarSizeInBits())
1404  return new SExtInst(X, Ty);
1405 
1406  // We can't handle (X << C1) >>s C2. It shifts arbitrary bits in. However,
1407  // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
1408  const APInt *ShOp1;
1409  if (match(Op0, m_NSWShl(m_Value(X), m_APInt(ShOp1))) &&
1410  ShOp1->ult(BitWidth)) {
1411  unsigned ShlAmt = ShOp1->getZExtValue();
1412  if (ShlAmt < ShAmt) {
1413  // (X <<nsw C1) >>s C2 --> X >>s (C2 - C1)
1414  Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShlAmt);
1415  auto *NewAShr = BinaryOperator::CreateAShr(X, ShiftDiff);
1416  NewAShr->setIsExact(I.isExact());
1417  return NewAShr;
1418  }
1419  if (ShlAmt > ShAmt) {
1420  // (X <<nsw C1) >>s C2 --> X <<nsw (C1 - C2)
1421  Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmt - ShAmt);
1422  auto *NewShl = BinaryOperator::Create(Instruction::Shl, X, ShiftDiff);
1423  NewShl->setHasNoSignedWrap(true);
1424  return NewShl;
1425  }
1426  }
1427 
1428  if (match(Op0, m_AShr(m_Value(X), m_APInt(ShOp1))) &&
1429  ShOp1->ult(BitWidth)) {
1430  unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
1431  // Oversized arithmetic shifts replicate the sign bit.
1432  AmtSum = std::min(AmtSum, BitWidth - 1);
1433  // (X >>s C1) >>s C2 --> X >>s (C1 + C2)
1434  return BinaryOperator::CreateAShr(X, ConstantInt::get(Ty, AmtSum));
1435  }
1436 
1437  if (match(Op0, m_OneUse(m_SExt(m_Value(X)))) &&
1438  (Ty->isVectorTy() || shouldChangeType(Ty, X->getType()))) {
1439  // ashr (sext X), C --> sext (ashr X, C')
1440  Type *SrcTy = X->getType();
1441  ShAmt = std::min(ShAmt, SrcTy->getScalarSizeInBits() - 1);
1442  Value *NewSh = Builder.CreateAShr(X, ConstantInt::get(SrcTy, ShAmt));
1443  return new SExtInst(NewSh, Ty);
1444  }
1445 
1446  if (ShAmt == BitWidth - 1) {
1447  // ashr i32 or(X,-X), 31 --> sext (X != 0)
1448  if (match(Op0, m_OneUse(m_c_Or(m_Neg(m_Value(X)), m_Deferred(X)))))
1449  return new SExtInst(Builder.CreateIsNotNull(X), Ty);
1450 
1451  // ashr i32 (X -nsw Y), 31 --> sext (X < Y)
1452  Value *Y;
1453  if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
1454  return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
1455  }
1456 
1457  // If the shifted-out value is known-zero, then this is an exact shift.
1458  if (!I.isExact() &&
1459  MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
1460  I.setIsExact();
1461  return &I;
1462  }
1463  }
1464 
1465  // Prefer `-(x & 1)` over `(x << (bitwidth(x)-1)) a>> (bitwidth(x)-1)`
1466  // as the pattern to splat the lowest bit.
1467  // FIXME: iff X is already masked, we don't need the one-use check.
1468  Value *X;
1469  if (match(Op1, m_SpecificIntAllowUndef(BitWidth - 1)) &&
1470  match(Op0, m_OneUse(m_Shl(m_Value(X),
1472  Constant *Mask = ConstantInt::get(Ty, 1);
1473  // Retain the knowledge about the ignored lanes.
1475  Constant::mergeUndefsWith(Mask, cast<Constant>(Op1)),
1476  cast<Constant>(cast<Instruction>(Op0)->getOperand(1)));
1477  X = Builder.CreateAnd(X, Mask);
1478  return BinaryOperator::CreateNeg(X);
1479  }
1480 
1481  if (Instruction *R = foldVariableSignZeroExtensionOfVariableHighBitExtract(I))
1482  return R;
1483 
1484  // See if we can turn a signed shr into an unsigned shr.
1486  return BinaryOperator::CreateLShr(Op0, Op1);
1487 
1488  // ashr (xor %x, -1), %y --> xor (ashr %x, %y), -1
1489  if (match(Op0, m_OneUse(m_Not(m_Value(X))))) {
1490  // Note that we must drop 'exact'-ness of the shift!
1491  // Note that we can't keep undef's in -1 vector constant!
1492  auto *NewAShr = Builder.CreateAShr(X, Op1, Op0->getName() + ".not");
1493  return BinaryOperator::CreateNot(NewAShr);
1494  }
1495 
1496  return nullptr;
1497 }
i
i
Definition: README.txt:29
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::PatternMatch::m_TruncOrSelf
match_combine_or< CastClass_match< OpTy, Instruction::Trunc >, OpTy > m_TruncOrSelf(const OpTy &Op)
Definition: PatternMatch.h:1617
llvm::ConstantExpr::getNot
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2619
llvm::MaskedValueIsZero
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
Definition: ValueTracking.cpp:391
llvm::PHINode::incoming_values
op_range incoming_values()
Definition: Instructions.h:2785
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::ConstantExpr::getZExtOrBitCast
static Constant * getZExtOrBitCast(Constant *C, Type *Ty)
Definition: Constants.cpp:1980
llvm::Value::hasOneUse
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
InstCombiner.h
llvm::CmpInst::Predicate
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:719
IntrinsicInst.h
llvm::SimplifyQuery
Definition: InstructionSimplify.h:93
canTryToConstantAddTwoShiftAmounts
bool canTryToConstantAddTwoShiftAmounts(Value *Sh0, Value *ShAmt0, Value *Sh1, Value *ShAmt1)
Definition: InstCombineShifts.cpp:23
llvm::ConstantExpr::getZExt
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2087
llvm::InstCombinerImpl::FoldShiftByConstant
Instruction * FoldShiftByConstant(Value *Op0, Constant *Op1, BinaryOperator &I)
Definition: InstCombineShifts.cpp:723
llvm::BinaryOperator::CreateNot
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Definition: Instructions.cpp:2895
llvm::PatternMatch::m_LShr
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1117
llvm::simplifyAShrInst
Value * simplifyAShrInst(Value *Op0, Value *Op1, bool isExact, const SimplifyQuery &Q)
Given operands for a AShr, fold the result or return nulll.
Definition: InstructionSimplify.cpp:1472
C1
instcombine should handle this C2 when C1
Definition: README.txt:263
canShiftBinOpWithConstantRHS
static bool canShiftBinOpWithConstantRHS(BinaryOperator &Shift, BinaryOperator *BO)
Definition: InstCombineShifts.cpp:706
llvm::InstCombiner::Builder
BuilderTy & Builder
Definition: InstCombiner.h:58
llvm::PatternMatch::m_Add
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
Definition: PatternMatch.h:979
llvm::PatternMatch::m_CombineOr
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:218
llvm::IRBuilder< TargetFolder, IRBuilderCallbackInserter >
llvm::CastInst::Create
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Definition: Instructions.cpp:3243
llvm::Instruction::hasNoUnsignedWrap
bool hasNoUnsignedWrap() const
Determine whether the no unsigned wrap flag is set.
Definition: Instruction.cpp:161
Shift
bool Shift
Definition: README.txt:468
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::tgtok::Bits
@ Bits
Definition: TGLexer.h:50
llvm::InstCombinerImpl::reassociateShiftAmtsOfTwoSameDirectionShifts
Value * reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0, const SimplifyQuery &SQ, bool AnalyzeForSignBitExtraction=false)
Definition: InstCombineShifts.cpp:58
canEvaluateShiftedShift
static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl, Instruction *InnerShift, InstCombinerImpl &IC, Instruction *CxtI)
Return true if we can simplify two logical (either left or right) shifts that have constant shift amo...
Definition: InstCombineShifts.cpp:474
llvm::APInt::ugt
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition: APInt.h:1132
llvm::Instruction::setHasNoUnsignedWrap
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
Definition: Instruction.cpp:149
llvm::PatternMatch::m_BinOp
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:84
llvm::PatternMatch::m_AShr
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1123
llvm::ComputeNumSignBits
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
Definition: ValueTracking.cpp:415
llvm::InstCombiner::addToWorklist
void addToWorklist(Instruction *I)
Definition: InstCombiner.h:366
llvm::tgtok::FalseVal
@ FalseVal
Definition: TGLexer.h:62
llvm::ARM_AM::ShiftOpc
ShiftOpc
Definition: ARMAddressingModes.h:27
llvm::CmpInst::ICMP_SLE
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:749
llvm::simplifyLShrInst
Value * simplifyLShrInst(Value *Op0, Value *Op1, bool isExact, const SimplifyQuery &Q)
Given operands for a LShr, fold the result or return null.
Definition: InstructionSimplify.cpp:1439
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:119
llvm::APInt::lshr
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:832
RHS
Value * RHS
Definition: X86PartialReduction.cpp:76
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:459
llvm::PatternMatch::m_c_And
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
Definition: PatternMatch.h:2251
llvm::BinaryOperator::CreateNeg
NUW NUW NUW NUW Exact static Exact BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
Definition: Instructions.cpp:2855
llvm::PatternMatch::m_Not
BinaryOp_match< ValTy, cst_pred_ty< is_all_ones >, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Definition: PatternMatch.h:2289
llvm::Instruction::setIsExact
void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
Definition: Instruction.cpp:157
llvm::PHINode::setIncomingValue
void setIncomingValue(unsigned i, Value *V)
Definition: Instructions.h:2798
llvm::PatternMatch::m_Deferred
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:790
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::PatternMatch::m_OneUse
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::ConstantExpr::getSub
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2632
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::PatternMatch::m_APInt
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:278
llvm::PatternMatch::m_c_BinOp
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
Definition: PatternMatch.h:2215
llvm::APInt::uge
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1171
llvm::Instruction::getOpcode
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:164
llvm::APInt::isNegative
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:312
llvm::SelectInst::Create
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
Definition: Instructions.h:1768
llvm::InstCombinerImpl::foldVariableSignZeroExtensionOfVariableHighBitExtract
Instruction * foldVariableSignZeroExtensionOfVariableHighBitExtract(BinaryOperator &OldAShr)
Definition: InstCombineShifts.cpp:1312
llvm::RecurKind::And
@ And
Bitwise or logical AND of integers.
hasNoUnsignedWrap
static bool hasNoUnsignedWrap(BinaryOperator &I)
Definition: InstructionCombining.cpp:298
llvm::APInt::eq
bool eq(const APInt &RHS) const
Equality comparison.
Definition: APInt.h:1029
InstCombineInternal.h
llvm::PatternMatch::m_Select
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
Definition: PatternMatch.h:1462
llvm::PatternMatch::match
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition: Instructions.h:2795
getScalarSizeInBits
static unsigned getScalarSizeInBits(Type *Ty)
Definition: SystemZTargetTransformInfo.cpp:403
llvm::APInt::isZero
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:359
llvm::Instruction::setHasNoSignedWrap
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
Definition: Instruction.cpp:153
llvm::PatternMatch::m_Exact
Exact_match< T > m_Exact(const T &SubPattern)
Definition: PatternMatch.h:1351
llvm::PatternMatch::m_SpecificIntAllowUndef
specific_intval< true > m_SpecificIntAllowUndef(APInt V)
Definition: PatternMatch.h:862
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
Y
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
getOpcode
static Optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:190
llvm::APInt::getAllOnes
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:214
llvm::Type::isVectorTy
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:232
llvm::PatternMatch::m_Instruction
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:716
llvm::PatternMatch::m_ZExt
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
Definition: PatternMatch.h:1629
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:548
llvm::PatternMatch::m_c_Add
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
Definition: PatternMatch.h:2237
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::Constant::getAllOnesValue
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:395
llvm::PatternMatch::m_SDiv
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1063
llvm::BinaryOperator::getOpcode
BinaryOps getOpcode() const
Definition: InstrTypes.h:392
llvm::Instruction
Definition: Instruction.h:42
llvm::Type::getScalarSizeInBits
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition: Type.cpp:189
llvm::PatternMatch::m_c_Or
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
Definition: PatternMatch.h:2258
llvm::APInt::getZExtValue
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1466
llvm::APInt::getHighBitsSet
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:279
llvm::InstCombinerImpl
Definition: InstCombineInternal.h:61
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:879
llvm::PatternMatch::m_NSWSub
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1164
PatternMatch.h
llvm::APInt::countTrailingZeros
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
Definition: APInt.h:1543
llvm::CastInst::CreateTruncOrBitCast
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
Definition: Instructions.cpp:3319
llvm::PatternMatch::m_Shift
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
Definition: PatternMatch.h:1296
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2791
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::PatternMatch::m_One
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
Definition: PatternMatch.h:517
llvm::Instruction::isCommutative
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
Definition: Instruction.cpp:796
llvm::APInt::ashr
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:808
llvm::PatternMatch::m_Power2
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
Definition: PatternMatch.h:544
llvm::PatternMatch::m_Xor
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1105
llvm::Type::getExtendedType
Type * getExtendedType() const
Given scalar/vector integer type, returns a type with elements twice as wide as in the original type.
Definition: DerivedTypes.h:706
llvm::Type::isIntegerTy
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:196
llvm::PatternMatch::m_Shr
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
Definition: PatternMatch.h:1303
llvm::PatternMatch::m_ZExtOrSelf
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, OpTy > m_ZExtOrSelf(const OpTy &Op)
Definition: PatternMatch.h:1635
llvm::ConstantExpr::getTrunc
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2059
llvm::PatternMatch::m_NUWAdd
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1189
llvm::InstCombinerImpl::visitShl
Instruction * visitShl(BinaryOperator &I)
Definition: InstCombineShifts.cpp:842
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::Instruction::isLogicalShift
bool isLogicalShift() const
Return true if this is a logical shift left or a logical shift right.
Definition: Instruction.h:205
llvm::Constant::replaceUndefsWith
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
Definition: Constants.cpp:747
llvm::APInt::logBase2
unsigned logBase2() const
Definition: APInt.h:1652
llvm::PatternMatch::m_AllOnes
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:453
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:54
llvm::simplifySubInst
Value * simplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
Definition: InstructionSimplify.cpp:878
I
#define I(x, y, z)
Definition: MD5.cpp:58
getShiftedValue
static Value * getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift, InstCombinerImpl &IC, const DataLayout &DL)
When canEvaluateShifted() returns true for an expression, this function inserts the new computation t...
Definition: InstCombineShifts.cpp:644
llvm::ConstantExpr::getShl
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2663
llvm::PatternMatch::m_And
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1093
llvm::PatternMatch::m_SRem
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1081
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::PatternMatch::m_Sub
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:991
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:853
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::simplifyAddInst
Value * simplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Definition: InstructionSimplify.cpp:697
llvm::SelectInst
This class represents the LLVM 'select' instruction.
Definition: Instructions.h:1737
llvm::SimplifyQuery::getWithInstruction
SimplifyQuery getWithInstruction(Instruction *I) const
Definition: InstructionSimplify.h:120
llvm::PatternMatch::m_Constant
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:144
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:651
llvm::PatternMatch::m_Value
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:76
llvm::ZExtInst
This class represents zero extension of integer types.
Definition: Instructions.h:4850
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
getType
static M68kRelType getType(unsigned Kind, MCSymbolRefExpr::VariantKind &Modifier, bool &IsPCRel)
Definition: M68kELFObjectWriter.cpp:48
llvm::PatternMatch::m_SExt
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
Matches SExt.
Definition: PatternMatch.h:1623
llvm::InstCombinerImpl::visitLShr
Instruction * visitLShr(BinaryOperator &I)
Definition: InstCombineShifts.cpp:1071
llvm::PatternMatch::m_SpecificInt
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:854
llvm::PatternMatch::m_CombineAnd
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
Definition: PatternMatch.h:224
llvm::PatternMatch::m_NUWMul
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1205
llvm::BinaryOperator
Definition: InstrTypes.h:188
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
Mul
BinaryOperator * Mul
Definition: X86PartialReduction.cpp:70
llvm::PatternMatch::m_SpecificInt_ICMP
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
Definition: PatternMatch.h:598
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:137
llvm::CmpInst::ICMP_ULT
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:744
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::Instruction::isExact
bool isExact() const
Determine whether the exact flag is set.
Definition: Instruction.cpp:221
CC
auto CC
Definition: RISCVRedundantCopyElimination.cpp:79
foldShiftedShift
static Value * foldShiftedShift(BinaryOperator *InnerShift, unsigned OuterShAmt, bool IsOuterShl, InstCombiner::BuilderTy &Builder)
Fold OuterShift (InnerShift X, C1), C2.
Definition: InstCombineShifts.cpp:581
llvm::APInt::ult
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition: APInt.h:1061
llvm::SExtInst
This class represents a sign extension of integer types.
Definition: Instructions.h:4889
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
llvm::InstCombinerImpl::visitAShr
Instruction * visitAShr(BinaryOperator &I)
Definition: InstCombineShifts.cpp:1380
llvm::MCID::Select
@ Select
Definition: MCInstrDesc.h:164
foldShiftOfShiftedLogic
static Instruction * foldShiftOfShiftedLogic(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
If we have a shift-by-constant of a bitwise logic op that itself has a shift-by-constant operand with...
Definition: InstCombineShifts.cpp:329
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:350
llvm::InstCombinerImpl::InsertNewInstWith
Instruction * InsertNewInstWith(Instruction *New, Instruction &Old)
Same as InsertNewInstBefore, but also sets the debug loc.
Definition: InstCombineInternal.h:395
llvm::APInt::isMinSignedValue
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition: APInt.h:408
llvm::InstCombinerImpl::commonShiftTransforms
Instruction * commonShiftTransforms(BinaryOperator &I)
Definition: InstCombineShifts.cpp:370
llvm::ConstantExpr::getAdd
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2625
llvm::APInt::isNegatedPowerOf2
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
Definition: APInt.h:434
llvm::ConstantInt::getSigned
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition: Constants.cpp:893
llvm::simplifyShlInst
Value * simplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for a Shl, fold the result or return null.
Definition: InstructionSimplify.cpp:1402
llvm::BitWidth
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
llvm::PatternMatch::m_NSWShl
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1180
dropRedundantMaskingOfLeftShiftInput
static Instruction * dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift, const SimplifyQuery &Q, InstCombiner::BuilderTy &Builder)
Definition: InstCombineShifts.cpp:184
llvm::ConstantExpr::getLShr
static Constant * getLShr(Constant *C1, Constant *C2, bool isExact=false)
Definition: Constants.cpp:2670
llvm::CmpInst::ICMP_SGE
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:747
llvm::MCID::Add
@ Add
Definition: MCInstrDesc.h:185
llvm::Constant::mergeUndefsWith
static Constant * mergeUndefsWith(Constant *C, Constant *Other)
Merges undefs of a Constant with another Constant, along with the undefs already present.
Definition: Constants.cpp:771
llvm::Instruction::BinaryOps
BinaryOps
Definition: Instruction.h:773
llvm::APInt::getSignMask
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition: APInt.h:209
llvm::PatternMatch::m_Specific
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:772
llvm::IRBuilderBase::CreateLShr
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1342
llvm::Instruction::hasNoSignedWrap
bool hasNoSignedWrap() const
Determine whether the no signed wrap flag is set.
Definition: Instruction.cpp:165
CreateMul
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp)
Definition: Reassociate.cpp:253
llvm::IRBuilderBase::CreateShl
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1321
canEvaluateShifted
static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift, InstCombinerImpl &IC, Instruction *CxtI)
See if we can compute the specified value, but shifted logically to the left or right by some number ...
Definition: InstCombineShifts.cpp:526
InstructionSimplify.h
llvm::PHINode
Definition: Instructions.h:2699
llvm::Instruction::copyIRFlags
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
Definition: Instruction.cpp:324
llvm::PatternMatch::m_Neg
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
Definition: PatternMatch.h:2273
llvm::APInt::getLowBitsSet
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:289
llvm::Value::takeName
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:381
llvm::APInt::shl
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition: APInt.h:854
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::PatternMatch::m_Trunc
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
Definition: PatternMatch.h:1611
llvm::BinaryOperator::Create
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Definition: Instructions.cpp:2839
llvm::tgtok::TrueVal
@ TrueVal
Definition: TGLexer.h:62
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::PatternMatch::m_Shl
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1111
llvm::InstCombinerImpl::MaskedValueIsZero
bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Definition: InstCombineInternal.h:487
llvm::PatternMatch::m_Mul
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1045