Go to the documentation of this file.
48 #include <type_traits>
60 #define DEBUG_TYPE "instcombine"
63 "Negator: Number of negations attempted to be sinked");
65 "Negator: Number of negations successfully sinked");
66 STATISTIC(NegatorMaxDepthVisited,
"Negator: Maximal traversal depth ever "
67 "reached while attempting to sink negation");
69 "Negator: How many times did the traversal depth limit was reached "
72 NegatorNumValuesVisited,
73 "Negator: Total number of values visited during attempts to sink negation");
74 STATISTIC(NegatorNumNegationsFoundInCache,
75 "Negator: How many negations did we retrieve/reuse from cache");
77 "Negator: Maximal number of values ever visited while attempting to "
79 STATISTIC(NegatorNumInstructionsCreatedTotal,
80 "Negator: Number of new negated instructions created, total");
82 "Negator: Maximal number of new instructions created during negation "
84 STATISTIC(NegatorNumInstructionsNegatedSuccess,
85 "Negator: Number of new negated instructions created in successful "
86 "negation sinking attempts");
89 "Controls Negator transformations in InstCombine pass");
93 cl::desc(
"Should we attempt to sink negations?"));
98 cl::desc(
"What is the maximal lookup depth when trying to "
99 "check for viability of negation sinking."));
105 ++NegatorNumInstructionsCreatedTotal;
106 NewInstructions.push_back(
I);
108 DL(DL_), AC(AC_), DT(DT_), IsTrulyNegation(IsTrulyNegation_) {}
110 #if LLVM_ENABLE_STATS
111 Negator::~Negator() {
112 NegatorMaxTotalValuesVisited.updateMax(NumValuesVisitedInThisNegator);
120 std::array<Value *, 2> Negator::getSortedOperandsOfBinOp(
Instruction *
I) {
121 assert(
I->getNumOperands() == 2 &&
"Only for binops!");
122 std::array<Value *, 2> Ops{
I->getOperand(0),
I->getOperand(1)};
152 if (!isa<Instruction>(V))
161 auto *
I = cast<Instruction>(V);
162 unsigned BitWidth =
I->getType()->getScalarSizeInBits();
166 InstCombiner::BuilderTy::InsertPointGuard Guard(
Builder);
172 switch (
I->getOpcode()) {
174 std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(
I);
177 return Builder.CreateNot(Ops[0],
I->getName() +
".neg");
180 case Instruction::Xor:
184 I->getName() +
".neg");
186 case Instruction::AShr:
187 case Instruction::LShr: {
191 Value *BO =
I->getOpcode() == Instruction::AShr
192 ?
Builder.CreateLShr(
I->getOperand(0),
I->getOperand(1))
193 :
Builder.CreateAShr(
I->getOperand(0),
I->getOperand(1));
194 if (
auto *NewInstr = dyn_cast<Instruction>(BO)) {
195 NewInstr->copyIRFlags(
I);
196 NewInstr->setName(
I->getName() +
".neg");
206 case Instruction::SExt:
207 case Instruction::ZExt:
209 if (
I->getOperand(0)->getType()->isIntOrIntVectorTy(1))
210 return I->getOpcode() == Instruction::SExt
211 ?
Builder.CreateZExt(
I->getOperand(0),
I->getType(),
212 I->getName() +
".neg")
213 :
Builder.CreateSExt(
I->getOperand(0),
I->getType(),
214 I->getName() +
".neg");
219 auto *Sel = cast<SelectInst>(
I);
225 return Builder.CreateSelect(Sel->getCondition(), NegTrueC, NegFalseC,
226 I->getName() +
".neg",
I);
234 if (
I->getOpcode() == Instruction::Sub &&
239 return Builder.CreateSub(
I->getOperand(1),
I->getOperand(0),
240 I->getName() +
".neg");
248 switch (
I->getOpcode()) {
249 case Instruction::ZExt: {
253 unsigned SrcWidth =
SrcOp->getType()->getScalarSizeInBits();
254 const APInt &FullShift =
APInt(SrcWidth, SrcWidth - 1);
255 if (IsTrulyNegation &&
258 return Builder.CreateSExt(Ashr,
I->getType());
262 case Instruction::And: {
268 unsigned BW =
X->getType()->getScalarSizeInBits();
272 return Builder.CreateTruncOrBitCast(
R,
I->getType());
276 case Instruction::SDiv:
280 if (
auto *Op1C = dyn_cast<Constant>(
I->getOperand(1))) {
281 if (!Op1C->containsUndefOrPoisonElement() &&
282 Op1C->isNotMinSignedValue() && Op1C->isNotOneValue()) {
285 I->getName() +
".neg");
286 if (
auto *NewInstr = dyn_cast<Instruction>(BO))
287 NewInstr->setIsExact(
I->isExact());
296 LLVM_DEBUG(
dbgs() <<
"Negator: reached maximal allowed traversal depth in "
297 << *V <<
". Giving up.\n");
298 ++NegatorTimesDepthLimitReached;
302 switch (
I->getOpcode()) {
303 case Instruction::Freeze: {
305 Value *NegOp = negate(
I->getOperand(0),
Depth + 1);
308 return Builder.CreateFreeze(NegOp,
I->getName() +
".neg");
312 auto *
PHI = cast<PHINode>(
I);
314 for (
auto I :
zip(
PHI->incoming_values(), NegatedIncomingValues)) {
315 if (!(std::get<1>(
I) =
316 negate(std::get<0>(
I),
Depth + 1)))
321 PHI->getType(),
PHI->getNumOperands(),
PHI->getName() +
".neg");
322 for (
auto I :
zip(NegatedIncomingValues,
PHI->blocks()))
330 auto *NewSelect = cast<SelectInst>(
I->clone());
332 NewSelect->swapValues();
334 NewSelect->setName(
I->getName() +
".neg");
339 Value *NegOp1 = negate(
I->getOperand(1),
Depth + 1);
342 Value *NegOp2 = negate(
I->getOperand(2),
Depth + 1);
346 return Builder.CreateSelect(
I->getOperand(0), NegOp1, NegOp2,
347 I->getName() +
".neg",
I);
349 case Instruction::ShuffleVector: {
351 auto *Shuf = cast<ShuffleVectorInst>(
I);
352 Value *NegOp0 = negate(
I->getOperand(0),
Depth + 1);
355 Value *NegOp1 = negate(
I->getOperand(1),
Depth + 1);
358 return Builder.CreateShuffleVector(NegOp0, NegOp1, Shuf->getShuffleMask(),
359 I->getName() +
".neg");
361 case Instruction::ExtractElement: {
363 auto *EEI = cast<ExtractElementInst>(
I);
364 Value *NegVector = negate(EEI->getVectorOperand(),
Depth + 1);
367 return Builder.CreateExtractElement(NegVector, EEI->getIndexOperand(),
368 I->getName() +
".neg");
370 case Instruction::InsertElement: {
373 auto *IEI = cast<InsertElementInst>(
I);
374 Value *NegVector = negate(IEI->getOperand(0),
Depth + 1);
377 Value *NegNewElt = negate(IEI->getOperand(1),
Depth + 1);
380 return Builder.CreateInsertElement(NegVector, NegNewElt, IEI->getOperand(2),
381 I->getName() +
".neg");
383 case Instruction::Trunc: {
385 Value *NegOp = negate(
I->getOperand(0),
Depth + 1);
388 return Builder.CreateTrunc(NegOp,
I->getType(),
I->getName() +
".neg");
390 case Instruction::Shl: {
392 if (
Value *NegOp0 = negate(
I->getOperand(0),
Depth + 1))
393 return Builder.CreateShl(NegOp0,
I->getOperand(1),
I->getName() +
".neg");
395 auto *Op1C = dyn_cast<Constant>(
I->getOperand(1));
396 if (!Op1C || !IsTrulyNegation)
401 I->getName() +
".neg");
403 case Instruction::Or: {
407 std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(
I);
411 return Builder.CreateNot(Ops[0],
I->getName() +
".neg");
426 if (!IsTrulyNegation)
430 assert((NegatedOps.size() + NonNegatedOps.size()) == 2 &&
431 "Internal consistency check failed.");
433 if (NegatedOps.size() == 2)
434 return Builder.CreateAdd(NegatedOps[0], NegatedOps[1],
435 I->getName() +
".neg");
436 assert(IsTrulyNegation &&
"We should have early-exited then.");
438 if (NonNegatedOps.size() == 2)
441 return Builder.CreateSub(NegatedOps[0], NonNegatedOps[0],
442 I->getName() +
".neg");
444 case Instruction::Xor: {
445 std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(
I);
448 if (
auto *
C = dyn_cast<Constant>(Ops[1])) {
451 I->getName() +
".neg");
456 std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(
I);
458 Value *NegatedOp, *OtherOp;
461 if (
Value *NegOp1 = negate(Ops[1],
Depth + 1)) {
464 }
else if (
Value *NegOp0 = negate(Ops[0],
Depth + 1)) {
470 return Builder.CreateMul(NegatedOp, OtherOp,
I->getName() +
".neg");
480 NegatorMaxDepthVisited.updateMax(
Depth);
481 ++NegatorNumValuesVisited;
483 #if LLVM_ENABLE_STATS
484 ++NumValuesVisitedInThisNegator;
489 Value *Placeholder =
reinterpret_cast<Value *
>(
static_cast<uintptr_t
>(-1));
493 auto NegationsCacheIterator = NegationsCache.find(V);
494 if (NegationsCacheIterator != NegationsCache.end()) {
495 ++NegatorNumNegationsFoundInCache;
496 Value *NegatedV = NegationsCacheIterator->second;
497 assert(NegatedV != Placeholder &&
"Encountered a cycle during negation.");
505 NegationsCache[V] = Placeholder;
511 NegationsCache[V] = NegatedV;
517 Value *Negated = negate(Root, 0);
522 I->eraseFromParent();
530 ++NegatorTotalNegationsAttempted;
531 LLVM_DEBUG(
dbgs() <<
"Negator: attempting to sink negation into " << *Root
539 std::optional<Result> Res =
N.run(Root);
541 LLVM_DEBUG(
dbgs() <<
"Negator: failed to sink negation into " << *Root
546 LLVM_DEBUG(
dbgs() <<
"Negator: successfully sunk negation into " << *Root
547 <<
"\n NEW: " << *Res->second <<
"\n");
548 ++NegatorNumTreesNegated;
560 <<
" instrs to InstCombine\n");
561 NegatorMaxInstructionsCreated.updateMax(Res->first.size());
562 NegatorNumInstructionsNegatedSuccess += Res->first.size();
This is an optimization pass for GlobalISel generic memory operations.
bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if LHS and RHS have no common bits set.
match_combine_or< CastClass_match< OpTy, Instruction::Trunc >, OpTy > m_TruncOrSelf(const OpTy &Op)
static Constant * getNot(Constant *C)
A parsed version of the target data layout string in and methods for querying it.
bool hasOneUse() const
Return true if there is exactly one use of this value.
DominatorTree & getDominatorTree() const
DEBUG_COUNTER(NegatorCounter, "instcombine-negator", "Controls Negator transformations in InstCombine pass")
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
void ClearInsertionPoint()
Clear the insertion point: created instructions will not be inserted into a block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
specific_intval< true > m_SpecificIntAllowUndef(APInt V)
(vector float) vec_cmpeq(*A, *B) C
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
static Constant * getAllOnesValue(Type *Ty)
static bool shouldExecute(unsigned CounterName)
STATISTIC(NumFunctions, "Total number of functions")
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
static constexpr unsigned NegatorDefaultMaxDepth
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static cl::opt< unsigned > NegatorMaxDepth("instcombine-negator-max-depth", cl::init(NegatorDefaultMaxDepth), cl::desc("What is the maximal lookup depth when trying to " "check for viability of negation sinking."))
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
This is an important base class in LLVM.
AssumptionCache & getAssumptionCache() const
const DataLayout & getDataLayout() const
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
This is an important class for using LLVM in a threaded context.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
initializer< Ty > init(const Ty &Val)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false)
Return true if the two given values are negation.
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
auto m_Undef()
Match an arbitrary undef constant.
A cache of @llvm.assume calls within a function.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Type * getType() const
All values are typed, get the type of this value.
LLVMContext & getContext() const
All values hold a context through their type.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
TargetFolder - Create constants with target dependent folding.
static Constant * getNeg(Constant *C, bool HasNUW=false, bool HasNSW=false)
static Value * Negate(bool LHSIsZero, Value *Root, InstCombinerImpl &IC)
Attempt to negate Root.
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
constexpr unsigned BitWidth
cst_pred_ty< is_any_apint > m_AnyIntegralConstant()
Match an integer or vector with any integral constant.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
static cl::opt< bool > NegatorEnabled("instcombine-negator-enabled", cl::init(true), cl::desc("Should we attempt to sink negations?"))
auto reverse(ContainerTy &&C)
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
LLVM Value Representation.
@ Xor
Bitwise or logical XOR of integers.
reference emplace_back(ArgTypes &&... Args)