LLVM 22.0.0git
InstCombineCompares.cpp
Go to the documentation of this file.
1//===- InstCombineCompares.cpp --------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitICmp and visitFCmp functions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/APFloat.h"
15#include "llvm/ADT/APSInt.h"
16#include "llvm/ADT/SetVector.h"
17#include "llvm/ADT/Statistic.h"
22#include "llvm/Analysis/Loads.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/InstrTypes.h"
34#include <bitset>
35
36using namespace llvm;
37using namespace PatternMatch;
38
39#define DEBUG_TYPE "instcombine"
40
41// How many times is a select replaced by one of its operands?
42STATISTIC(NumSel, "Number of select opts");
43
44/// Compute Result = In1+In2, returning true if the result overflowed for this
45/// type.
46static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2,
47 bool IsSigned = false) {
48 bool Overflow;
49 if (IsSigned)
50 Result = In1.sadd_ov(In2, Overflow);
51 else
52 Result = In1.uadd_ov(In2, Overflow);
53
54 return Overflow;
55}
56
57/// Compute Result = In1-In2, returning true if the result overflowed for this
58/// type.
59static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2,
60 bool IsSigned = false) {
61 bool Overflow;
62 if (IsSigned)
63 Result = In1.ssub_ov(In2, Overflow);
64 else
65 Result = In1.usub_ov(In2, Overflow);
66
67 return Overflow;
68}
69
70/// Given an icmp instruction, return true if any use of this comparison is a
71/// branch on sign bit comparison.
72static bool hasBranchUse(ICmpInst &I) {
73 for (auto *U : I.users())
74 if (isa<BranchInst>(U))
75 return true;
76 return false;
77}
78
79/// Returns true if the exploded icmp can be expressed as a signed comparison
80/// to zero and updates the predicate accordingly.
81/// The signedness of the comparison is preserved.
82/// TODO: Refactor with decomposeBitTestICmp()?
83static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
84 if (!ICmpInst::isSigned(Pred))
85 return false;
86
87 if (C.isZero())
88 return ICmpInst::isRelational(Pred);
89
90 if (C.isOne()) {
91 if (Pred == ICmpInst::ICMP_SLT) {
92 Pred = ICmpInst::ICMP_SLE;
93 return true;
94 }
95 } else if (C.isAllOnes()) {
96 if (Pred == ICmpInst::ICMP_SGT) {
97 Pred = ICmpInst::ICMP_SGE;
98 return true;
99 }
100 }
101
102 return false;
103}
104
105/// This is called when we see this pattern:
106/// cmp pred (load (gep GV, ...)), cmpcst
107/// where GV is a global variable with a constant initializer. Try to simplify
108/// this into some simple computation that does not need the load. For example
109/// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
110///
111/// If AndCst is non-null, then the loaded value is masked with that constant
112/// before doing the comparison. This handles cases like "A[i]&4 == 0".
114 LoadInst *LI, GetElementPtrInst *GEP, CmpInst &ICI, ConstantInt *AndCst) {
116 if (LI->isVolatile() || !GV || !GV->isConstant() ||
117 !GV->hasDefinitiveInitializer())
118 return nullptr;
119
120 Type *EltTy = LI->getType();
121 TypeSize EltSize = DL.getTypeStoreSize(EltTy);
122 if (EltSize.isScalable())
123 return nullptr;
124
126 if (!Expr.Index || Expr.BasePtr != GV || Expr.Offset.getBitWidth() > 64)
127 return nullptr;
128
129 Constant *Init = GV->getInitializer();
130 TypeSize GlobalSize = DL.getTypeAllocSize(Init->getType());
131
132 Value *Idx = Expr.Index;
133 const APInt &Stride = Expr.Scale;
134 const APInt &ConstOffset = Expr.Offset;
135
136 // Allow an additional context offset, but only within the stride.
137 if (!ConstOffset.ult(Stride))
138 return nullptr;
139
140 // Don't handle overlapping loads for now.
141 if (!Stride.uge(EltSize.getFixedValue()))
142 return nullptr;
143
144 // Don't blow up on huge arrays.
145 uint64_t ArrayElementCount =
146 divideCeil((GlobalSize.getFixedValue() - ConstOffset.getZExtValue()),
147 Stride.getZExtValue());
148 if (ArrayElementCount > MaxArraySizeForCombine)
149 return nullptr;
150
151 enum { Overdefined = -3, Undefined = -2 };
152
153 // Variables for our state machines.
154
155 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
156 // "i == 47 | i == 87", where 47 is the first index the condition is true for,
157 // and 87 is the second (and last) index. FirstTrueElement is -2 when
158 // undefined, otherwise set to the first true element. SecondTrueElement is
159 // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
160 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
161
162 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
163 // form "i != 47 & i != 87". Same state transitions as for true elements.
164 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
165
166 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
167 /// define a state machine that triggers for ranges of values that the index
168 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
169 /// This is -2 when undefined, -3 when overdefined, and otherwise the last
170 /// index in the range (inclusive). We use -2 for undefined here because we
171 /// use relative comparisons and don't want 0-1 to match -1.
172 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
173
174 // MagicBitvector - This is a magic bitvector where we set a bit if the
175 // comparison is true for element 'i'. If there are 64 elements or less in
176 // the array, this will fully represent all the comparison results.
177 uint64_t MagicBitvector = 0;
178
179 // Scan the array and see if one of our patterns matches.
180 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
181 APInt Offset = ConstOffset;
182 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i, Offset += Stride) {
184 if (!Elt)
185 return nullptr;
186
187 // If the element is masked, handle it.
188 if (AndCst) {
189 Elt = ConstantFoldBinaryOpOperands(Instruction::And, Elt, AndCst, DL);
190 if (!Elt)
191 return nullptr;
192 }
193
194 // Find out if the comparison would be true or false for the i'th element.
196 CompareRHS, DL, &TLI);
197 if (!C)
198 return nullptr;
199
200 // If the result is undef for this element, ignore it.
201 if (isa<UndefValue>(C)) {
202 // Extend range state machines to cover this element in case there is an
203 // undef in the middle of the range.
204 if (TrueRangeEnd == (int)i - 1)
205 TrueRangeEnd = i;
206 if (FalseRangeEnd == (int)i - 1)
207 FalseRangeEnd = i;
208 continue;
209 }
210
211 // If we can't compute the result for any of the elements, we have to give
212 // up evaluating the entire conditional.
213 if (!isa<ConstantInt>(C))
214 return nullptr;
215
216 // Otherwise, we know if the comparison is true or false for this element,
217 // update our state machines.
218 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
219
220 // State machine for single/double/range index comparison.
221 if (IsTrueForElt) {
222 // Update the TrueElement state machine.
223 if (FirstTrueElement == Undefined)
224 FirstTrueElement = TrueRangeEnd = i; // First true element.
225 else {
226 // Update double-compare state machine.
227 if (SecondTrueElement == Undefined)
228 SecondTrueElement = i;
229 else
230 SecondTrueElement = Overdefined;
231
232 // Update range state machine.
233 if (TrueRangeEnd == (int)i - 1)
234 TrueRangeEnd = i;
235 else
236 TrueRangeEnd = Overdefined;
237 }
238 } else {
239 // Update the FalseElement state machine.
240 if (FirstFalseElement == Undefined)
241 FirstFalseElement = FalseRangeEnd = i; // First false element.
242 else {
243 // Update double-compare state machine.
244 if (SecondFalseElement == Undefined)
245 SecondFalseElement = i;
246 else
247 SecondFalseElement = Overdefined;
248
249 // Update range state machine.
250 if (FalseRangeEnd == (int)i - 1)
251 FalseRangeEnd = i;
252 else
253 FalseRangeEnd = Overdefined;
254 }
255 }
256
257 // If this element is in range, update our magic bitvector.
258 if (i < 64 && IsTrueForElt)
259 MagicBitvector |= 1ULL << i;
260
261 // If all of our states become overdefined, bail out early. Since the
262 // predicate is expensive, only check it every 8 elements. This is only
263 // really useful for really huge arrays.
264 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
265 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
266 FalseRangeEnd == Overdefined)
267 return nullptr;
268 }
269
270 // Now that we've scanned the entire array, emit our new comparison(s). We
271 // order the state machines in complexity of the generated code.
272
273 // If inbounds keyword is not present, Idx * Stride can overflow.
274 // Let's assume that Stride is 2 and the wanted value is at offset 0.
275 // Then, there are two possible values for Idx to match offset 0:
276 // 0x00..00, 0x80..00.
277 // Emitting 'icmp eq Idx, 0' isn't correct in this case because the
278 // comparison is false if Idx was 0x80..00.
279 // We need to erase the highest countTrailingZeros(ElementSize) bits of Idx.
280 auto MaskIdx = [&](Value *Idx) {
281 if (!Expr.Flags.isInBounds() && Stride.countr_zero() != 0) {
283 Mask = Builder.CreateLShr(Mask, Stride.countr_zero());
284 Idx = Builder.CreateAnd(Idx, Mask);
285 }
286 return Idx;
287 };
288
289 // If the comparison is only true for one or two elements, emit direct
290 // comparisons.
291 if (SecondTrueElement != Overdefined) {
292 Idx = MaskIdx(Idx);
293 // None true -> false.
294 if (FirstTrueElement == Undefined)
295 return replaceInstUsesWith(ICI, Builder.getFalse());
296
297 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
298
299 // True for one element -> 'i == 47'.
300 if (SecondTrueElement == Undefined)
301 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
302
303 // True for two elements -> 'i == 47 | i == 72'.
304 Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
305 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
306 Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
307 return BinaryOperator::CreateOr(C1, C2);
308 }
309
310 // If the comparison is only false for one or two elements, emit direct
311 // comparisons.
312 if (SecondFalseElement != Overdefined) {
313 Idx = MaskIdx(Idx);
314 // None false -> true.
315 if (FirstFalseElement == Undefined)
316 return replaceInstUsesWith(ICI, Builder.getTrue());
317
318 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
319
320 // False for one element -> 'i != 47'.
321 if (SecondFalseElement == Undefined)
322 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
323
324 // False for two elements -> 'i != 47 & i != 72'.
325 Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
326 Value *SecondFalseIdx =
327 ConstantInt::get(Idx->getType(), SecondFalseElement);
328 Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
329 return BinaryOperator::CreateAnd(C1, C2);
330 }
331
332 // If the comparison can be replaced with a range comparison for the elements
333 // where it is true, emit the range check.
334 if (TrueRangeEnd != Overdefined) {
335 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
336 Idx = MaskIdx(Idx);
337
338 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
339 if (FirstTrueElement) {
340 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
341 Idx = Builder.CreateAdd(Idx, Offs);
342 }
343
344 Value *End =
345 ConstantInt::get(Idx->getType(), TrueRangeEnd - FirstTrueElement + 1);
346 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
347 }
348
349 // False range check.
350 if (FalseRangeEnd != Overdefined) {
351 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
352 Idx = MaskIdx(Idx);
353 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
354 if (FirstFalseElement) {
355 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
356 Idx = Builder.CreateAdd(Idx, Offs);
357 }
358
359 Value *End =
360 ConstantInt::get(Idx->getType(), FalseRangeEnd - FirstFalseElement);
361 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
362 }
363
364 // If a magic bitvector captures the entire comparison state
365 // of this load, replace it with computation that does:
366 // ((magic_cst >> i) & 1) != 0
367 {
368 Type *Ty = nullptr;
369
370 // Look for an appropriate type:
371 // - The type of Idx if the magic fits
372 // - The smallest fitting legal type
373 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
374 Ty = Idx->getType();
375 else
376 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
377
378 if (Ty) {
379 Idx = MaskIdx(Idx);
380 Value *V = Builder.CreateIntCast(Idx, Ty, false);
381 V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
382 V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
383 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
384 }
385 }
386
387 return nullptr;
388}
389
390/// Returns true if we can rewrite Start as a GEP with pointer Base
391/// and some integer offset. The nodes that need to be re-written
392/// for this transformation will be added to Explored.
394 const DataLayout &DL,
395 SetVector<Value *> &Explored) {
396 SmallVector<Value *, 16> WorkList(1, Start);
397 Explored.insert(Base);
398
399 // The following traversal gives us an order which can be used
400 // when doing the final transformation. Since in the final
401 // transformation we create the PHI replacement instructions first,
402 // we don't have to get them in any particular order.
403 //
404 // However, for other instructions we will have to traverse the
405 // operands of an instruction first, which means that we have to
406 // do a post-order traversal.
407 while (!WorkList.empty()) {
409
410 while (!WorkList.empty()) {
411 if (Explored.size() >= 100)
412 return false;
413
414 Value *V = WorkList.back();
415
416 if (Explored.contains(V)) {
417 WorkList.pop_back();
418 continue;
419 }
420
422 // We've found some value that we can't explore which is different from
423 // the base. Therefore we can't do this transformation.
424 return false;
425
426 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
427 // Only allow inbounds GEPs with at most one variable offset.
428 auto IsNonConst = [](Value *V) { return !isa<ConstantInt>(V); };
429 if (!GEP->isInBounds() || count_if(GEP->indices(), IsNonConst) > 1)
430 return false;
431
432 NW = NW.intersectForOffsetAdd(GEP->getNoWrapFlags());
433 if (!Explored.contains(GEP->getOperand(0)))
434 WorkList.push_back(GEP->getOperand(0));
435 }
436
437 if (WorkList.back() == V) {
438 WorkList.pop_back();
439 // We've finished visiting this node, mark it as such.
440 Explored.insert(V);
441 }
442
443 if (auto *PN = dyn_cast<PHINode>(V)) {
444 // We cannot transform PHIs on unsplittable basic blocks.
445 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
446 return false;
447 Explored.insert(PN);
448 PHIs.insert(PN);
449 }
450 }
451
452 // Explore the PHI nodes further.
453 for (auto *PN : PHIs)
454 for (Value *Op : PN->incoming_values())
455 if (!Explored.contains(Op))
456 WorkList.push_back(Op);
457 }
458
459 // Make sure that we can do this. Since we can't insert GEPs in a basic
460 // block before a PHI node, we can't easily do this transformation if
461 // we have PHI node users of transformed instructions.
462 for (Value *Val : Explored) {
463 for (Value *Use : Val->uses()) {
464
465 auto *PHI = dyn_cast<PHINode>(Use);
466 auto *Inst = dyn_cast<Instruction>(Val);
467
468 if (Inst == Base || Inst == PHI || !Inst || !PHI ||
469 !Explored.contains(PHI))
470 continue;
471
472 if (PHI->getParent() == Inst->getParent())
473 return false;
474 }
475 }
476 return true;
477}
478
479// Sets the appropriate insert point on Builder where we can add
480// a replacement Instruction for V (if that is possible).
481static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
482 bool Before = true) {
483 if (auto *PHI = dyn_cast<PHINode>(V)) {
484 BasicBlock *Parent = PHI->getParent();
485 Builder.SetInsertPoint(Parent, Parent->getFirstInsertionPt());
486 return;
487 }
488 if (auto *I = dyn_cast<Instruction>(V)) {
489 if (!Before)
490 I = &*std::next(I->getIterator());
491 Builder.SetInsertPoint(I);
492 return;
493 }
494 if (auto *A = dyn_cast<Argument>(V)) {
495 // Set the insertion point in the entry block.
496 BasicBlock &Entry = A->getParent()->getEntryBlock();
497 Builder.SetInsertPoint(&Entry, Entry.getFirstInsertionPt());
498 return;
499 }
500 // Otherwise, this is a constant and we don't need to set a new
501 // insertion point.
502 assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
503}
504
505/// Returns a re-written value of Start as an indexed GEP using Base as a
506/// pointer.
508 const DataLayout &DL,
509 SetVector<Value *> &Explored,
510 InstCombiner &IC) {
511 // Perform all the substitutions. This is a bit tricky because we can
512 // have cycles in our use-def chains.
513 // 1. Create the PHI nodes without any incoming values.
514 // 2. Create all the other values.
515 // 3. Add the edges for the PHI nodes.
516 // 4. Emit GEPs to get the original pointers.
517 // 5. Remove the original instructions.
518 Type *IndexType = IntegerType::get(
519 Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType()));
520
522 NewInsts[Base] = ConstantInt::getNullValue(IndexType);
523
524 // Create the new PHI nodes, without adding any incoming values.
525 for (Value *Val : Explored) {
526 if (Val == Base)
527 continue;
528 // Create empty phi nodes. This avoids cyclic dependencies when creating
529 // the remaining instructions.
530 if (auto *PHI = dyn_cast<PHINode>(Val))
531 NewInsts[PHI] =
532 PHINode::Create(IndexType, PHI->getNumIncomingValues(),
533 PHI->getName() + ".idx", PHI->getIterator());
534 }
535 IRBuilder<> Builder(Base->getContext());
536
537 // Create all the other instructions.
538 for (Value *Val : Explored) {
539 if (NewInsts.contains(Val))
540 continue;
541
542 if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
543 setInsertionPoint(Builder, GEP);
544 Value *Op = NewInsts[GEP->getOperand(0)];
545 Value *OffsetV = emitGEPOffset(&Builder, DL, GEP);
547 NewInsts[GEP] = OffsetV;
548 else
549 NewInsts[GEP] = Builder.CreateAdd(
550 Op, OffsetV, GEP->getOperand(0)->getName() + ".add",
551 /*NUW=*/NW.hasNoUnsignedWrap(),
552 /*NSW=*/NW.hasNoUnsignedSignedWrap());
553 continue;
554 }
555 if (isa<PHINode>(Val))
556 continue;
557
558 llvm_unreachable("Unexpected instruction type");
559 }
560
561 // Add the incoming values to the PHI nodes.
562 for (Value *Val : Explored) {
563 if (Val == Base)
564 continue;
565 // All the instructions have been created, we can now add edges to the
566 // phi nodes.
567 if (auto *PHI = dyn_cast<PHINode>(Val)) {
568 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
569 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
570 Value *NewIncoming = PHI->getIncomingValue(I);
571
572 auto It = NewInsts.find(NewIncoming);
573 if (It != NewInsts.end())
574 NewIncoming = It->second;
575
576 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
577 }
578 }
579 }
580
581 for (Value *Val : Explored) {
582 if (Val == Base)
583 continue;
584
585 setInsertionPoint(Builder, Val, false);
586 // Create GEP for external users.
587 Value *NewVal = Builder.CreateGEP(Builder.getInt8Ty(), Base, NewInsts[Val],
588 Val->getName() + ".ptr", NW);
589 IC.replaceInstUsesWith(*cast<Instruction>(Val), NewVal);
590 // Add old instruction to worklist for DCE. We don't directly remove it
591 // here because the original compare is one of the users.
593 }
594
595 return NewInsts[Start];
596}
597
598/// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
599/// We can look through PHIs, GEPs and casts in order to determine a common base
600/// between GEPLHS and RHS.
603 const DataLayout &DL,
604 InstCombiner &IC) {
605 // FIXME: Support vector of pointers.
606 if (GEPLHS->getType()->isVectorTy())
607 return nullptr;
608
609 if (!GEPLHS->hasAllConstantIndices())
610 return nullptr;
611
612 APInt Offset(DL.getIndexTypeSizeInBits(GEPLHS->getType()), 0);
613 Value *PtrBase =
615 /*AllowNonInbounds*/ false);
616
617 // Bail if we looked through addrspacecast.
618 if (PtrBase->getType() != GEPLHS->getType())
619 return nullptr;
620
621 // The set of nodes that will take part in this transformation.
622 SetVector<Value *> Nodes;
623 GEPNoWrapFlags NW = GEPLHS->getNoWrapFlags();
624 if (!canRewriteGEPAsOffset(RHS, PtrBase, NW, DL, Nodes))
625 return nullptr;
626
627 // We know we can re-write this as
628 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
629 // Since we've only looked through inbouds GEPs we know that we
630 // can't have overflow on either side. We can therefore re-write
631 // this as:
632 // OFFSET1 cmp OFFSET2
633 Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, NW, DL, Nodes, IC);
634
635 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
636 // GEP having PtrBase as the pointer base, and has returned in NewRHS the
637 // offset. Since Index is the offset of LHS to the base pointer, we will now
638 // compare the offsets instead of comparing the pointers.
640 IC.Builder.getInt(Offset), NewRHS);
641}
642
643/// Fold comparisons between a GEP instruction and something else. At this point
644/// we know that the GEP is on the LHS of the comparison.
647 // Don't transform signed compares of GEPs into index compares. Even if the
648 // GEP is inbounds, the final add of the base pointer can have signed overflow
649 // and would change the result of the icmp.
650 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
651 // the maximum signed value for the pointer type.
653 return nullptr;
654
655 // Look through bitcasts and addrspacecasts. We do not however want to remove
656 // 0 GEPs.
657 if (!isa<GetElementPtrInst>(RHS))
658 RHS = RHS->stripPointerCasts();
659
660 auto CanFold = [Cond](GEPNoWrapFlags NW) {
662 return true;
663
664 // Unsigned predicates can be folded if the GEPs have *any* nowrap flags.
666 return NW != GEPNoWrapFlags::none();
667 };
668
669 auto NewICmp = [Cond](GEPNoWrapFlags NW, Value *Op1, Value *Op2) {
670 if (!NW.hasNoUnsignedWrap()) {
671 // Convert signed to unsigned comparison.
672 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Op1, Op2);
673 }
674
675 auto *I = new ICmpInst(Cond, Op1, Op2);
676 I->setSameSign(NW.hasNoUnsignedSignedWrap());
677 return I;
678 };
679
681 if (Base.Ptr == RHS && CanFold(Base.LHSNW) && !Base.isExpensive()) {
682 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
683 Type *IdxTy = DL.getIndexType(GEPLHS->getType());
684 Value *Offset =
685 EmitGEPOffsets(Base.LHSGEPs, Base.LHSNW, IdxTy, /*RewriteGEPs=*/true);
686 return NewICmp(Base.LHSNW, Offset,
687 Constant::getNullValue(Offset->getType()));
688 }
689
690 if (GEPLHS->isInBounds() && ICmpInst::isEquality(Cond) &&
691 isa<Constant>(RHS) && cast<Constant>(RHS)->isNullValue() &&
692 !NullPointerIsDefined(I.getFunction(),
693 RHS->getType()->getPointerAddressSpace())) {
694 // For most address spaces, an allocation can't be placed at null, but null
695 // itself is treated as a 0 size allocation in the in bounds rules. Thus,
696 // the only valid inbounds address derived from null, is null itself.
697 // Thus, we have four cases to consider:
698 // 1) Base == nullptr, Offset == 0 -> inbounds, null
699 // 2) Base == nullptr, Offset != 0 -> poison as the result is out of bounds
700 // 3) Base != nullptr, Offset == (-base) -> poison (crossing allocations)
701 // 4) Base != nullptr, Offset != (-base) -> nonnull (and possibly poison)
702 //
703 // (Note if we're indexing a type of size 0, that simply collapses into one
704 // of the buckets above.)
705 //
706 // In general, we're allowed to make values less poison (i.e. remove
707 // sources of full UB), so in this case, we just select between the two
708 // non-poison cases (1 and 4 above).
709 //
710 // For vectors, we apply the same reasoning on a per-lane basis.
711 auto *Base = GEPLHS->getPointerOperand();
712 if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) {
713 auto EC = cast<VectorType>(GEPLHS->getType())->getElementCount();
714 Base = Builder.CreateVectorSplat(EC, Base);
715 }
716 return new ICmpInst(Cond, Base,
718 cast<Constant>(RHS), Base->getType()));
719 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
720 GEPNoWrapFlags NW = GEPLHS->getNoWrapFlags() & GEPRHS->getNoWrapFlags();
721
722 // If the base pointers are different, but the indices are the same, just
723 // compare the base pointer.
724 if (GEPLHS->getOperand(0) != GEPRHS->getOperand(0)) {
725 bool IndicesTheSame =
726 GEPLHS->getNumOperands() == GEPRHS->getNumOperands() &&
727 GEPLHS->getPointerOperand()->getType() ==
728 GEPRHS->getPointerOperand()->getType() &&
729 GEPLHS->getSourceElementType() == GEPRHS->getSourceElementType();
730 if (IndicesTheSame)
731 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
732 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
733 IndicesTheSame = false;
734 break;
735 }
736
737 // If all indices are the same, just compare the base pointers.
738 Type *BaseType = GEPLHS->getOperand(0)->getType();
739 if (IndicesTheSame &&
740 CmpInst::makeCmpResultType(BaseType) == I.getType() && CanFold(NW))
741 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
742
743 // If we're comparing GEPs with two base pointers that only differ in type
744 // and both GEPs have only constant indices or just one use, then fold
745 // the compare with the adjusted indices.
746 // FIXME: Support vector of pointers.
747 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
748 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
749 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
750 GEPLHS->getOperand(0)->stripPointerCasts() ==
751 GEPRHS->getOperand(0)->stripPointerCasts() &&
752 !GEPLHS->getType()->isVectorTy()) {
753 Value *LOffset = EmitGEPOffset(GEPLHS);
754 Value *ROffset = EmitGEPOffset(GEPRHS);
755
756 // If we looked through an addrspacecast between different sized address
757 // spaces, the LHS and RHS pointers are different sized
758 // integers. Truncate to the smaller one.
759 Type *LHSIndexTy = LOffset->getType();
760 Type *RHSIndexTy = ROffset->getType();
761 if (LHSIndexTy != RHSIndexTy) {
762 if (LHSIndexTy->getPrimitiveSizeInBits().getFixedValue() <
763 RHSIndexTy->getPrimitiveSizeInBits().getFixedValue()) {
764 ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
765 } else
766 LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
767 }
768
770 LOffset, ROffset);
771 return replaceInstUsesWith(I, Cmp);
772 }
773 }
774
775 if (GEPLHS->getOperand(0) == GEPRHS->getOperand(0) &&
776 GEPLHS->getNumOperands() == GEPRHS->getNumOperands() &&
777 GEPLHS->getSourceElementType() == GEPRHS->getSourceElementType()) {
778 // If the GEPs only differ by one index, compare it.
779 unsigned NumDifferences = 0; // Keep track of # differences.
780 unsigned DiffOperand = 0; // The operand that differs.
781 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
782 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
783 Type *LHSType = GEPLHS->getOperand(i)->getType();
784 Type *RHSType = GEPRHS->getOperand(i)->getType();
785 // FIXME: Better support for vector of pointers.
786 if (LHSType->getPrimitiveSizeInBits() !=
787 RHSType->getPrimitiveSizeInBits() ||
788 (GEPLHS->getType()->isVectorTy() &&
789 (!LHSType->isVectorTy() || !RHSType->isVectorTy()))) {
790 // Irreconcilable differences.
791 NumDifferences = 2;
792 break;
793 }
794
795 if (NumDifferences++)
796 break;
797 DiffOperand = i;
798 }
799
800 if (NumDifferences == 0) // SAME GEP?
801 return replaceInstUsesWith(
802 I, // No comparison is needed here.
803 ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond)));
804 // If two GEPs only differ by an index, compare them.
805 // Note that nowrap flags are always needed when comparing two indices.
806 else if (NumDifferences == 1 && NW != GEPNoWrapFlags::none()) {
807 Value *LHSV = GEPLHS->getOperand(DiffOperand);
808 Value *RHSV = GEPRHS->getOperand(DiffOperand);
809 return NewICmp(NW, LHSV, RHSV);
810 }
811 }
812
813 if (Base.Ptr && CanFold(Base.LHSNW & Base.RHSNW) && !Base.isExpensive()) {
814 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
815 Type *IdxTy = DL.getIndexType(GEPLHS->getType());
816 Value *L =
817 EmitGEPOffsets(Base.LHSGEPs, Base.LHSNW, IdxTy, /*RewriteGEP=*/true);
818 Value *R =
819 EmitGEPOffsets(Base.RHSGEPs, Base.RHSNW, IdxTy, /*RewriteGEP=*/true);
820 return NewICmp(Base.LHSNW & Base.RHSNW, L, R);
821 }
822 }
823
824 // Try convert this to an indexed compare by looking through PHIs/casts as a
825 // last resort.
826 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL, *this);
827}
828
830 // It would be tempting to fold away comparisons between allocas and any
831 // pointer not based on that alloca (e.g. an argument). However, even
832 // though such pointers cannot alias, they can still compare equal.
833 //
834 // But LLVM doesn't specify where allocas get their memory, so if the alloca
835 // doesn't escape we can argue that it's impossible to guess its value, and we
836 // can therefore act as if any such guesses are wrong.
837 //
838 // However, we need to ensure that this folding is consistent: We can't fold
839 // one comparison to false, and then leave a different comparison against the
840 // same value alone (as it might evaluate to true at runtime, leading to a
841 // contradiction). As such, this code ensures that all comparisons are folded
842 // at the same time, and there are no other escapes.
843
844 struct CmpCaptureTracker : public CaptureTracker {
845 AllocaInst *Alloca;
846 bool Captured = false;
847 /// The value of the map is a bit mask of which icmp operands the alloca is
848 /// used in.
850
851 CmpCaptureTracker(AllocaInst *Alloca) : Alloca(Alloca) {}
852
853 void tooManyUses() override { Captured = true; }
854
855 Action captured(const Use *U, UseCaptureInfo CI) override {
856 // TODO(captures): Use UseCaptureInfo.
857 auto *ICmp = dyn_cast<ICmpInst>(U->getUser());
858 // We need to check that U is based *only* on the alloca, and doesn't
859 // have other contributions from a select/phi operand.
860 // TODO: We could check whether getUnderlyingObjects() reduces to one
861 // object, which would allow looking through phi nodes.
862 if (ICmp && ICmp->isEquality() && getUnderlyingObject(*U) == Alloca) {
863 // Collect equality icmps of the alloca, and don't treat them as
864 // captures.
865 ICmps[ICmp] |= 1u << U->getOperandNo();
866 return Continue;
867 }
868
869 Captured = true;
870 return Stop;
871 }
872 };
873
874 CmpCaptureTracker Tracker(Alloca);
875 PointerMayBeCaptured(Alloca, &Tracker);
876 if (Tracker.Captured)
877 return false;
878
879 bool Changed = false;
880 for (auto [ICmp, Operands] : Tracker.ICmps) {
881 switch (Operands) {
882 case 1:
883 case 2: {
884 // The alloca is only used in one icmp operand. Assume that the
885 // equality is false.
886 auto *Res = ConstantInt::get(ICmp->getType(),
887 ICmp->getPredicate() == ICmpInst::ICMP_NE);
888 replaceInstUsesWith(*ICmp, Res);
890 Changed = true;
891 break;
892 }
893 case 3:
894 // Both icmp operands are based on the alloca, so this is comparing
895 // pointer offsets, without leaking any information about the address
896 // of the alloca. Ignore such comparisons.
897 break;
898 default:
899 llvm_unreachable("Cannot happen");
900 }
901 }
902
903 return Changed;
904}
905
906/// Fold "icmp pred (X+C), X".
908 CmpPredicate Pred) {
909 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
910 // so the values can never be equal. Similarly for all other "or equals"
911 // operators.
912 assert(!!C && "C should not be zero!");
913
914 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255
915 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
916 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
917 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
918 Constant *R =
919 ConstantInt::get(X->getType(), APInt::getMaxValue(C.getBitWidth()) - C);
920 return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
921 }
922
923 // (X+1) >u X --> X <u (0-1) --> X != 255
924 // (X+2) >u X --> X <u (0-2) --> X <u 254
925 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
926 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
927 return new ICmpInst(ICmpInst::ICMP_ULT, X,
928 ConstantInt::get(X->getType(), -C));
929
930 APInt SMax = APInt::getSignedMaxValue(C.getBitWidth());
931
932 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
933 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
934 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
935 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
936 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
937 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
938 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
939 return new ICmpInst(ICmpInst::ICMP_SGT, X,
940 ConstantInt::get(X->getType(), SMax - C));
941
942 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
943 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
944 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
945 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
946 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
947 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
948
949 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
950 return new ICmpInst(ICmpInst::ICMP_SLT, X,
951 ConstantInt::get(X->getType(), SMax - (C - 1)));
952}
953
954/// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
955/// (icmp eq/ne A, Log2(AP2/AP1)) ->
956/// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
958 const APInt &AP1,
959 const APInt &AP2) {
960 assert(I.isEquality() && "Cannot fold icmp gt/lt");
961
962 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
963 if (I.getPredicate() == I.ICMP_NE)
964 Pred = CmpInst::getInversePredicate(Pred);
965 return new ICmpInst(Pred, LHS, RHS);
966 };
967
968 // Don't bother doing any work for cases which InstSimplify handles.
969 if (AP2.isZero())
970 return nullptr;
971
972 bool IsAShr = isa<AShrOperator>(I.getOperand(0));
973 if (IsAShr) {
974 if (AP2.isAllOnes())
975 return nullptr;
976 if (AP2.isNegative() != AP1.isNegative())
977 return nullptr;
978 if (AP2.sgt(AP1))
979 return nullptr;
980 }
981
982 if (!AP1)
983 // 'A' must be large enough to shift out the highest set bit.
984 return getICmp(I.ICMP_UGT, A,
985 ConstantInt::get(A->getType(), AP2.logBase2()));
986
987 if (AP1 == AP2)
988 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
989
990 int Shift;
991 if (IsAShr && AP1.isNegative())
992 Shift = AP1.countl_one() - AP2.countl_one();
993 else
994 Shift = AP1.countl_zero() - AP2.countl_zero();
995
996 if (Shift > 0) {
997 if (IsAShr && AP1 == AP2.ashr(Shift)) {
998 // There are multiple solutions if we are comparing against -1 and the LHS
999 // of the ashr is not a power of two.
1000 if (AP1.isAllOnes() && !AP2.isPowerOf2())
1001 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1002 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1003 } else if (AP1 == AP2.lshr(Shift)) {
1004 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1005 }
1006 }
1007
1008 // Shifting const2 will never be equal to const1.
1009 // FIXME: This should always be handled by InstSimplify?
1010 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1011 return replaceInstUsesWith(I, TorF);
1012}
1013
1014/// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1015/// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
1017 const APInt &AP1,
1018 const APInt &AP2) {
1019 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1020
1021 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1022 if (I.getPredicate() == I.ICMP_NE)
1023 Pred = CmpInst::getInversePredicate(Pred);
1024 return new ICmpInst(Pred, LHS, RHS);
1025 };
1026
1027 // Don't bother doing any work for cases which InstSimplify handles.
1028 if (AP2.isZero())
1029 return nullptr;
1030
1031 unsigned AP2TrailingZeros = AP2.countr_zero();
1032
1033 if (!AP1 && AP2TrailingZeros != 0)
1034 return getICmp(
1035 I.ICMP_UGE, A,
1036 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1037
1038 if (AP1 == AP2)
1039 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1040
1041 // Get the distance between the lowest bits that are set.
1042 int Shift = AP1.countr_zero() - AP2TrailingZeros;
1043
1044 if (Shift > 0 && AP2.shl(Shift) == AP1)
1045 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1046
1047 // Shifting const2 will never be equal to const1.
1048 // FIXME: This should always be handled by InstSimplify?
1049 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1050 return replaceInstUsesWith(I, TorF);
1051}
1052
1053/// The caller has matched a pattern of the form:
1054/// I = icmp ugt (add (add A, B), CI2), CI1
1055/// If this is of the form:
1056/// sum = a + b
1057/// if (sum+128 >u 255)
1058/// Then replace it with llvm.sadd.with.overflow.i8.
1059///
1061 ConstantInt *CI2, ConstantInt *CI1,
1062 InstCombinerImpl &IC) {
1063 // The transformation we're trying to do here is to transform this into an
1064 // llvm.sadd.with.overflow. To do this, we have to replace the original add
1065 // with a narrower add, and discard the add-with-constant that is part of the
1066 // range check (if we can't eliminate it, this isn't profitable).
1067
1068 // In order to eliminate the add-with-constant, the compare can be its only
1069 // use.
1070 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1071 if (!AddWithCst->hasOneUse())
1072 return nullptr;
1073
1074 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1075 if (!CI2->getValue().isPowerOf2())
1076 return nullptr;
1077 unsigned NewWidth = CI2->getValue().countr_zero();
1078 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1079 return nullptr;
1080
1081 // The width of the new add formed is 1 more than the bias.
1082 ++NewWidth;
1083
1084 // Check to see that CI1 is an all-ones value with NewWidth bits.
1085 if (CI1->getBitWidth() == NewWidth ||
1086 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1087 return nullptr;
1088
1089 // This is only really a signed overflow check if the inputs have been
1090 // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1091 // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1092 if (IC.ComputeMaxSignificantBits(A, &I) > NewWidth ||
1093 IC.ComputeMaxSignificantBits(B, &I) > NewWidth)
1094 return nullptr;
1095
1096 // In order to replace the original add with a narrower
1097 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1098 // and truncates that discard the high bits of the add. Verify that this is
1099 // the case.
1100 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1101 for (User *U : OrigAdd->users()) {
1102 if (U == AddWithCst)
1103 continue;
1104
1105 // Only accept truncates for now. We would really like a nice recursive
1106 // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1107 // chain to see which bits of a value are actually demanded. If the
1108 // original add had another add which was then immediately truncated, we
1109 // could still do the transformation.
1111 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1112 return nullptr;
1113 }
1114
1115 // If the pattern matches, truncate the inputs to the narrower type and
1116 // use the sadd_with_overflow intrinsic to efficiently compute both the
1117 // result and the overflow bit.
1118 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1120 I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1121
1122 InstCombiner::BuilderTy &Builder = IC.Builder;
1123
1124 // Put the new code above the original add, in case there are any uses of the
1125 // add between the add and the compare.
1126 Builder.SetInsertPoint(OrigAdd);
1127
1128 Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
1129 Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
1130 CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
1131 Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
1132 Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
1133
1134 // The inner add was the result of the narrow add, zero extended to the
1135 // wider type. Replace it with the result computed by the intrinsic.
1136 IC.replaceInstUsesWith(*OrigAdd, ZExt);
1137 IC.eraseInstFromFunction(*OrigAdd);
1138
1139 // The original icmp gets replaced with the overflow value.
1140 return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1141}
1142
1143/// If we have:
1144/// icmp eq/ne (urem/srem %x, %y), 0
1145/// iff %y is a power-of-two, we can replace this with a bit test:
1146/// icmp eq/ne (and %x, (add %y, -1)), 0
1148 // This fold is only valid for equality predicates.
1149 if (!I.isEquality())
1150 return nullptr;
1151 CmpPredicate Pred;
1152 Value *X, *Y, *Zero;
1153 if (!match(&I, m_ICmp(Pred, m_OneUse(m_IRem(m_Value(X), m_Value(Y))),
1154 m_CombineAnd(m_Zero(), m_Value(Zero)))))
1155 return nullptr;
1156 if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, &I))
1157 return nullptr;
1158 // This may increase instruction count, we don't enforce that Y is a constant.
1159 Value *Mask = Builder.CreateAdd(Y, Constant::getAllOnesValue(Y->getType()));
1160 Value *Masked = Builder.CreateAnd(X, Mask);
1161 return ICmpInst::Create(Instruction::ICmp, Pred, Masked, Zero);
1162}
1163
1164/// Fold equality-comparison between zero and any (maybe truncated) right-shift
1165/// by one-less-than-bitwidth into a sign test on the original value.
1167 Instruction *Val;
1168 CmpPredicate Pred;
1169 if (!I.isEquality() || !match(&I, m_ICmp(Pred, m_Instruction(Val), m_Zero())))
1170 return nullptr;
1171
1172 Value *X;
1173 Type *XTy;
1174
1175 Constant *C;
1176 if (match(Val, m_TruncOrSelf(m_Shr(m_Value(X), m_Constant(C))))) {
1177 XTy = X->getType();
1178 unsigned XBitWidth = XTy->getScalarSizeInBits();
1180 APInt(XBitWidth, XBitWidth - 1))))
1181 return nullptr;
1182 } else if (isa<BinaryOperator>(Val) &&
1184 cast<BinaryOperator>(Val), SQ.getWithInstruction(Val),
1185 /*AnalyzeForSignBitExtraction=*/true))) {
1186 XTy = X->getType();
1187 } else
1188 return nullptr;
1189
1190 return ICmpInst::Create(Instruction::ICmp,
1194}
1195
1196// Handle icmp pred X, 0
1198 CmpInst::Predicate Pred = Cmp.getPredicate();
1199 if (!match(Cmp.getOperand(1), m_Zero()))
1200 return nullptr;
1201
1202 // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1203 if (Pred == ICmpInst::ICMP_SGT) {
1204 Value *A, *B;
1205 if (match(Cmp.getOperand(0), m_SMin(m_Value(A), m_Value(B)))) {
1206 if (isKnownPositive(A, SQ.getWithInstruction(&Cmp)))
1207 return new ICmpInst(Pred, B, Cmp.getOperand(1));
1208 if (isKnownPositive(B, SQ.getWithInstruction(&Cmp)))
1209 return new ICmpInst(Pred, A, Cmp.getOperand(1));
1210 }
1211 }
1212
1214 return New;
1215
1216 // Given:
1217 // icmp eq/ne (urem %x, %y), 0
1218 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
1219 // icmp eq/ne %x, 0
1220 Value *X, *Y;
1221 if (match(Cmp.getOperand(0), m_URem(m_Value(X), m_Value(Y))) &&
1222 ICmpInst::isEquality(Pred)) {
1223 KnownBits XKnown = computeKnownBits(X, &Cmp);
1224 KnownBits YKnown = computeKnownBits(Y, &Cmp);
1225 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
1226 return new ICmpInst(Pred, X, Cmp.getOperand(1));
1227 }
1228
1229 // (icmp eq/ne (mul X Y)) -> (icmp eq/ne X/Y) if we know about whether X/Y are
1230 // odd/non-zero/there is no overflow.
1231 if (match(Cmp.getOperand(0), m_Mul(m_Value(X), m_Value(Y))) &&
1232 ICmpInst::isEquality(Pred)) {
1233
1234 KnownBits XKnown = computeKnownBits(X, &Cmp);
1235 // if X % 2 != 0
1236 // (icmp eq/ne Y)
1237 if (XKnown.countMaxTrailingZeros() == 0)
1238 return new ICmpInst(Pred, Y, Cmp.getOperand(1));
1239
1240 KnownBits YKnown = computeKnownBits(Y, &Cmp);
1241 // if Y % 2 != 0
1242 // (icmp eq/ne X)
1243 if (YKnown.countMaxTrailingZeros() == 0)
1244 return new ICmpInst(Pred, X, Cmp.getOperand(1));
1245
1246 auto *BO0 = cast<OverflowingBinaryOperator>(Cmp.getOperand(0));
1247 if (BO0->hasNoUnsignedWrap() || BO0->hasNoSignedWrap()) {
1248 const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
1249 // `isKnownNonZero` does more analysis than just `!KnownBits.One.isZero()`
1250 // but to avoid unnecessary work, first just if this is an obvious case.
1251
1252 // if X non-zero and NoOverflow(X * Y)
1253 // (icmp eq/ne Y)
1254 if (!XKnown.One.isZero() || isKnownNonZero(X, Q))
1255 return new ICmpInst(Pred, Y, Cmp.getOperand(1));
1256
1257 // if Y non-zero and NoOverflow(X * Y)
1258 // (icmp eq/ne X)
1259 if (!YKnown.One.isZero() || isKnownNonZero(Y, Q))
1260 return new ICmpInst(Pred, X, Cmp.getOperand(1));
1261 }
1262 // Note, we are skipping cases:
1263 // if Y % 2 != 0 AND X % 2 != 0
1264 // (false/true)
1265 // if X non-zero and Y non-zero and NoOverflow(X * Y)
1266 // (false/true)
1267 // Those can be simplified later as we would have already replaced the (icmp
1268 // eq/ne (mul X, Y)) with (icmp eq/ne X/Y) and if X/Y is known non-zero that
1269 // will fold to a constant elsewhere.
1270 }
1271
1272 // (icmp eq/ne f(X), 0) -> (icmp eq/ne X, 0)
1273 // where f(X) == 0 if and only if X == 0
1274 if (ICmpInst::isEquality(Pred))
1275 if (Value *Stripped = stripNullTest(Cmp.getOperand(0)))
1276 return new ICmpInst(Pred, Stripped,
1277 Constant::getNullValue(Stripped->getType()));
1278
1279 return nullptr;
1280}
1281
1282/// Fold icmp eq (num + mask) & ~mask, num
1283/// to
1284/// icmp eq (and num, mask), 0
1285/// Where mask is a low bit mask.
1287 Value *Num;
1288 CmpPredicate Pred;
1289 const APInt *Mask, *Neg;
1290
1291 if (!match(&Cmp,
1292 m_c_ICmp(Pred, m_Value(Num),
1294 m_LowBitMask(Mask))),
1295 m_APInt(Neg))))))
1296 return nullptr;
1297
1298 if (*Neg != ~*Mask)
1299 return nullptr;
1300
1301 if (!ICmpInst::isEquality(Pred))
1302 return nullptr;
1303
1304 // Create new icmp eq (num & mask), 0
1305 auto *NewAnd = Builder.CreateAnd(Num, *Mask);
1306 auto *Zero = Constant::getNullValue(Num->getType());
1307
1308 return new ICmpInst(Pred, NewAnd, Zero);
1309}
1310
1311/// Fold icmp Pred X, C.
1312/// TODO: This code structure does not make sense. The saturating add fold
1313/// should be moved to some other helper and extended as noted below (it is also
1314/// possible that code has been made unnecessary - do we canonicalize IR to
1315/// overflow/saturating intrinsics or not?).
1317 // Match the following pattern, which is a common idiom when writing
1318 // overflow-safe integer arithmetic functions. The source performs an addition
1319 // in wider type and explicitly checks for overflow using comparisons against
1320 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1321 //
1322 // TODO: This could probably be generalized to handle other overflow-safe
1323 // operations if we worked out the formulas to compute the appropriate magic
1324 // constants.
1325 //
1326 // sum = a + b
1327 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8
1328 CmpInst::Predicate Pred = Cmp.getPredicate();
1329 Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1330 Value *A, *B;
1331 ConstantInt *CI, *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1332 if (Pred == ICmpInst::ICMP_UGT && match(Op1, m_ConstantInt(CI)) &&
1333 match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1334 if (Instruction *Res = processUGT_ADDCST_ADD(Cmp, A, B, CI2, CI, *this))
1335 return Res;
1336
1337 // icmp(phi(C1, C2, ...), C) -> phi(icmp(C1, C), icmp(C2, C), ...).
1339 if (!C)
1340 return nullptr;
1341
1342 if (auto *Phi = dyn_cast<PHINode>(Op0))
1343 if (all_of(Phi->operands(), IsaPred<Constant>)) {
1345 for (Value *V : Phi->incoming_values()) {
1346 Constant *Res =
1348 if (!Res)
1349 return nullptr;
1350 Ops.push_back(Res);
1351 }
1352 Builder.SetInsertPoint(Phi);
1353 PHINode *NewPhi = Builder.CreatePHI(Cmp.getType(), Phi->getNumOperands());
1354 for (auto [V, Pred] : zip(Ops, Phi->blocks()))
1355 NewPhi->addIncoming(V, Pred);
1356 return replaceInstUsesWith(Cmp, NewPhi);
1357 }
1358
1360 return R;
1361
1362 return nullptr;
1363}
1364
1365/// Canonicalize icmp instructions based on dominating conditions.
1367 // We already checked simple implication in InstSimplify, only handle complex
1368 // cases here.
1369 Value *X = Cmp.getOperand(0), *Y = Cmp.getOperand(1);
1370 const APInt *C;
1371 if (!match(Y, m_APInt(C)))
1372 return nullptr;
1373
1374 CmpInst::Predicate Pred = Cmp.getPredicate();
1376
1377 auto handleDomCond = [&](ICmpInst::Predicate DomPred,
1378 const APInt *DomC) -> Instruction * {
1379 // We have 2 compares of a variable with constants. Calculate the constant
1380 // ranges of those compares to see if we can transform the 2nd compare:
1381 // DomBB:
1382 // DomCond = icmp DomPred X, DomC
1383 // br DomCond, CmpBB, FalseBB
1384 // CmpBB:
1385 // Cmp = icmp Pred X, C
1386 ConstantRange DominatingCR =
1387 ConstantRange::makeExactICmpRegion(DomPred, *DomC);
1388 ConstantRange Intersection = DominatingCR.intersectWith(CR);
1389 ConstantRange Difference = DominatingCR.difference(CR);
1390 if (Intersection.isEmptySet())
1391 return replaceInstUsesWith(Cmp, Builder.getFalse());
1392 if (Difference.isEmptySet())
1393 return replaceInstUsesWith(Cmp, Builder.getTrue());
1394
1395 // Canonicalizing a sign bit comparison that gets used in a branch,
1396 // pessimizes codegen by generating branch on zero instruction instead
1397 // of a test and branch. So we avoid canonicalizing in such situations
1398 // because test and branch instruction has better branch displacement
1399 // than compare and branch instruction.
1400 bool UnusedBit;
1401 bool IsSignBit = isSignBitCheck(Pred, *C, UnusedBit);
1402 if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp)))
1403 return nullptr;
1404
1405 // Avoid an infinite loop with min/max canonicalization.
1406 // TODO: This will be unnecessary if we canonicalize to min/max intrinsics.
1407 if (Cmp.hasOneUse() &&
1408 match(Cmp.user_back(), m_MaxOrMin(m_Value(), m_Value())))
1409 return nullptr;
1410
1411 if (const APInt *EqC = Intersection.getSingleElement())
1412 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*EqC));
1413 if (const APInt *NeC = Difference.getSingleElement())
1414 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*NeC));
1415 return nullptr;
1416 };
1417
1418 for (BranchInst *BI : DC.conditionsFor(X)) {
1419 CmpPredicate DomPred;
1420 const APInt *DomC;
1421 if (!match(BI->getCondition(),
1422 m_ICmp(DomPred, m_Specific(X), m_APInt(DomC))))
1423 continue;
1424
1425 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
1426 if (DT.dominates(Edge0, Cmp.getParent())) {
1427 if (auto *V = handleDomCond(DomPred, DomC))
1428 return V;
1429 } else {
1430 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
1431 if (DT.dominates(Edge1, Cmp.getParent()))
1432 if (auto *V =
1433 handleDomCond(CmpInst::getInversePredicate(DomPred), DomC))
1434 return V;
1435 }
1436 }
1437
1438 return nullptr;
1439}
1440
1441/// Fold icmp (trunc X), C.
1443 TruncInst *Trunc,
1444 const APInt &C) {
1445 ICmpInst::Predicate Pred = Cmp.getPredicate();
1446 Value *X = Trunc->getOperand(0);
1447 Type *SrcTy = X->getType();
1448 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1449 SrcBits = SrcTy->getScalarSizeInBits();
1450
1451 // Match (icmp pred (trunc nuw/nsw X), C)
1452 // Which we can convert to (icmp pred X, (sext/zext C))
1453 if (shouldChangeType(Trunc->getType(), SrcTy)) {
1454 if (Trunc->hasNoSignedWrap())
1455 return new ICmpInst(Pred, X, ConstantInt::get(SrcTy, C.sext(SrcBits)));
1456 if (!Cmp.isSigned() && Trunc->hasNoUnsignedWrap())
1457 return new ICmpInst(Pred, X, ConstantInt::get(SrcTy, C.zext(SrcBits)));
1458 }
1459
1460 if (C.isOne() && C.getBitWidth() > 1) {
1461 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1462 Value *V = nullptr;
1463 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1464 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1465 ConstantInt::get(V->getType(), 1));
1466 }
1467
1468 // TODO: Handle non-equality predicates.
1469 Value *Y;
1470 const APInt *Pow2;
1471 if (Cmp.isEquality() && match(X, m_Shl(m_Power2(Pow2), m_Value(Y))) &&
1472 DstBits > Pow2->logBase2()) {
1473 // (trunc (Pow2 << Y) to iN) == 0 --> Y u>= N - log2(Pow2)
1474 // (trunc (Pow2 << Y) to iN) != 0 --> Y u< N - log2(Pow2)
1475 // iff N > log2(Pow2)
1476 if (C.isZero()) {
1477 auto NewPred = (Pred == Cmp.ICMP_EQ) ? Cmp.ICMP_UGE : Cmp.ICMP_ULT;
1478 return new ICmpInst(NewPred, Y,
1479 ConstantInt::get(SrcTy, DstBits - Pow2->logBase2()));
1480 }
1481 // (trunc (Pow2 << Y) to iN) == 2**C --> Y == C - log2(Pow2)
1482 // (trunc (Pow2 << Y) to iN) != 2**C --> Y != C - log2(Pow2)
1483 if (C.isPowerOf2())
1484 return new ICmpInst(
1485 Pred, Y, ConstantInt::get(SrcTy, C.logBase2() - Pow2->logBase2()));
1486 }
1487
1488 if (Cmp.isEquality() && (Trunc->hasOneUse() || Trunc->hasNoUnsignedWrap())) {
1489 // Canonicalize to a mask and wider compare if the wide type is suitable:
1490 // (trunc X to i8) == C --> (X & 0xff) == (zext C)
1491 if (!SrcTy->isVectorTy() && shouldChangeType(DstBits, SrcBits)) {
1492 Constant *Mask =
1493 ConstantInt::get(SrcTy, APInt::getLowBitsSet(SrcBits, DstBits));
1494 Value *And = Trunc->hasNoUnsignedWrap() ? X : Builder.CreateAnd(X, Mask);
1495 Constant *WideC = ConstantInt::get(SrcTy, C.zext(SrcBits));
1496 return new ICmpInst(Pred, And, WideC);
1497 }
1498
1499 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1500 // of the high bits truncated out of x are known.
1501 KnownBits Known = computeKnownBits(X, &Cmp);
1502
1503 // If all the high bits are known, we can do this xform.
1504 if ((Known.Zero | Known.One).countl_one() >= SrcBits - DstBits) {
1505 // Pull in the high bits from known-ones set.
1506 APInt NewRHS = C.zext(SrcBits);
1507 NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1508 return new ICmpInst(Pred, X, ConstantInt::get(SrcTy, NewRHS));
1509 }
1510 }
1511
1512 // Look through truncated right-shift of the sign-bit for a sign-bit check:
1513 // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] < 0 --> ShOp < 0
1514 // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] > -1 --> ShOp > -1
1515 Value *ShOp;
1516 uint64_t ShAmt;
1517 bool TrueIfSigned;
1518 if (isSignBitCheck(Pred, C, TrueIfSigned) &&
1519 match(X, m_Shr(m_Value(ShOp), m_ConstantInt(ShAmt))) &&
1520 DstBits == SrcBits - ShAmt) {
1521 return TrueIfSigned ? new ICmpInst(ICmpInst::ICMP_SLT, ShOp,
1523 : new ICmpInst(ICmpInst::ICMP_SGT, ShOp,
1525 }
1526
1527 return nullptr;
1528}
1529
1530/// Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
1531/// Fold icmp (trunc nuw/nsw X), (zext/sext Y).
1534 const SimplifyQuery &Q) {
1535 Value *X, *Y;
1536 CmpPredicate Pred;
1537 bool YIsSExt = false;
1538 // Try to match icmp (trunc X), (trunc Y)
1539 if (match(&Cmp, m_ICmp(Pred, m_Trunc(m_Value(X)), m_Trunc(m_Value(Y))))) {
1540 unsigned NoWrapFlags = cast<TruncInst>(Cmp.getOperand(0))->getNoWrapKind() &
1541 cast<TruncInst>(Cmp.getOperand(1))->getNoWrapKind();
1542 if (Cmp.isSigned()) {
1543 // For signed comparisons, both truncs must be nsw.
1544 if (!(NoWrapFlags & TruncInst::NoSignedWrap))
1545 return nullptr;
1546 } else {
1547 // For unsigned and equality comparisons, either both must be nuw or
1548 // both must be nsw, we don't care which.
1549 if (!NoWrapFlags)
1550 return nullptr;
1551 }
1552
1553 if (X->getType() != Y->getType() &&
1554 (!Cmp.getOperand(0)->hasOneUse() || !Cmp.getOperand(1)->hasOneUse()))
1555 return nullptr;
1556 if (!isDesirableIntType(X->getType()->getScalarSizeInBits()) &&
1557 isDesirableIntType(Y->getType()->getScalarSizeInBits())) {
1558 std::swap(X, Y);
1559 Pred = Cmp.getSwappedPredicate(Pred);
1560 }
1561 YIsSExt = !(NoWrapFlags & TruncInst::NoUnsignedWrap);
1562 }
1563 // Try to match icmp (trunc nuw X), (zext Y)
1564 else if (!Cmp.isSigned() &&
1565 match(&Cmp, m_c_ICmp(Pred, m_NUWTrunc(m_Value(X)),
1566 m_OneUse(m_ZExt(m_Value(Y)))))) {
1567 // Can fold trunc nuw + zext for unsigned and equality predicates.
1568 }
1569 // Try to match icmp (trunc nsw X), (sext Y)
1570 else if (match(&Cmp, m_c_ICmp(Pred, m_NSWTrunc(m_Value(X)),
1572 // Can fold trunc nsw + zext/sext for all predicates.
1573 YIsSExt =
1574 isa<SExtInst>(Cmp.getOperand(0)) || isa<SExtInst>(Cmp.getOperand(1));
1575 } else
1576 return nullptr;
1577
1578 Type *TruncTy = Cmp.getOperand(0)->getType();
1579 unsigned TruncBits = TruncTy->getScalarSizeInBits();
1580
1581 // If this transform will end up changing from desirable types -> undesirable
1582 // types skip it.
1583 if (isDesirableIntType(TruncBits) &&
1584 !isDesirableIntType(X->getType()->getScalarSizeInBits()))
1585 return nullptr;
1586
1587 Value *NewY = Builder.CreateIntCast(Y, X->getType(), YIsSExt);
1588 return new ICmpInst(Pred, X, NewY);
1589}
1590
1591/// Fold icmp (xor X, Y), C.
1594 const APInt &C) {
1595 if (Instruction *I = foldICmpXorShiftConst(Cmp, Xor, C))
1596 return I;
1597
1598 Value *X = Xor->getOperand(0);
1599 Value *Y = Xor->getOperand(1);
1600 const APInt *XorC;
1601 if (!match(Y, m_APInt(XorC)))
1602 return nullptr;
1603
1604 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1605 // fold the xor.
1606 ICmpInst::Predicate Pred = Cmp.getPredicate();
1607 bool TrueIfSigned = false;
1608 if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) {
1609
1610 // If the sign bit of the XorCst is not set, there is no change to
1611 // the operation, just stop using the Xor.
1612 if (!XorC->isNegative())
1613 return replaceOperand(Cmp, 0, X);
1614
1615 // Emit the opposite comparison.
1616 if (TrueIfSigned)
1617 return new ICmpInst(ICmpInst::ICMP_SGT, X,
1618 ConstantInt::getAllOnesValue(X->getType()));
1619 else
1620 return new ICmpInst(ICmpInst::ICMP_SLT, X,
1621 ConstantInt::getNullValue(X->getType()));
1622 }
1623
1624 if (Xor->hasOneUse()) {
1625 // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1626 if (!Cmp.isEquality() && XorC->isSignMask()) {
1627 Pred = Cmp.getFlippedSignednessPredicate();
1628 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1629 }
1630
1631 // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1632 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1633 Pred = Cmp.getFlippedSignednessPredicate();
1634 Pred = Cmp.getSwappedPredicate(Pred);
1635 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1636 }
1637 }
1638
1639 // Mask constant magic can eliminate an 'xor' with unsigned compares.
1640 if (Pred == ICmpInst::ICMP_UGT) {
1641 // (xor X, ~C) >u C --> X <u ~C (when C+1 is a power of 2)
1642 if (*XorC == ~C && (C + 1).isPowerOf2())
1643 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1644 // (xor X, C) >u C --> X >u C (when C+1 is a power of 2)
1645 if (*XorC == C && (C + 1).isPowerOf2())
1646 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
1647 }
1648 if (Pred == ICmpInst::ICMP_ULT) {
1649 // (xor X, -C) <u C --> X >u ~C (when C is a power of 2)
1650 if (*XorC == -C && C.isPowerOf2())
1651 return new ICmpInst(ICmpInst::ICMP_UGT, X,
1652 ConstantInt::get(X->getType(), ~C));
1653 // (xor X, C) <u C --> X >u ~C (when -C is a power of 2)
1654 if (*XorC == C && (-C).isPowerOf2())
1655 return new ICmpInst(ICmpInst::ICMP_UGT, X,
1656 ConstantInt::get(X->getType(), ~C));
1657 }
1658 return nullptr;
1659}
1660
1661/// For power-of-2 C:
1662/// ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1)
1663/// ((X s>> ShiftC) ^ X) u> (C - 1) --> (X + C) u> ((C << 1) - 1)
1666 const APInt &C) {
1667 CmpInst::Predicate Pred = Cmp.getPredicate();
1668 APInt PowerOf2;
1669 if (Pred == ICmpInst::ICMP_ULT)
1670 PowerOf2 = C;
1671 else if (Pred == ICmpInst::ICMP_UGT && !C.isMaxValue())
1672 PowerOf2 = C + 1;
1673 else
1674 return nullptr;
1675 if (!PowerOf2.isPowerOf2())
1676 return nullptr;
1677 Value *X;
1678 const APInt *ShiftC;
1680 m_AShr(m_Deferred(X), m_APInt(ShiftC))))))
1681 return nullptr;
1682 uint64_t Shift = ShiftC->getLimitedValue();
1683 Type *XType = X->getType();
1684 if (Shift == 0 || PowerOf2.isMinSignedValue())
1685 return nullptr;
1686 Value *Add = Builder.CreateAdd(X, ConstantInt::get(XType, PowerOf2));
1687 APInt Bound =
1688 Pred == ICmpInst::ICMP_ULT ? PowerOf2 << 1 : ((PowerOf2 << 1) - 1);
1689 return new ICmpInst(Pred, Add, ConstantInt::get(XType, Bound));
1690}
1691
1692/// Fold icmp (and (sh X, Y), C2), C1.
1695 const APInt &C1,
1696 const APInt &C2) {
1697 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1698 if (!Shift || !Shift->isShift())
1699 return nullptr;
1700
1701 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1702 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1703 // code produced by the clang front-end, for bitfield access.
1704 // This seemingly simple opportunity to fold away a shift turns out to be
1705 // rather complicated. See PR17827 for details.
1706 unsigned ShiftOpcode = Shift->getOpcode();
1707 bool IsShl = ShiftOpcode == Instruction::Shl;
1708 const APInt *C3;
1709 if (match(Shift->getOperand(1), m_APInt(C3))) {
1710 APInt NewAndCst, NewCmpCst;
1711 bool AnyCmpCstBitsShiftedOut;
1712 if (ShiftOpcode == Instruction::Shl) {
1713 // For a left shift, we can fold if the comparison is not signed. We can
1714 // also fold a signed comparison if the mask value and comparison value
1715 // are not negative. These constraints may not be obvious, but we can
1716 // prove that they are correct using an SMT solver.
1717 if (Cmp.isSigned() && (C2.isNegative() || C1.isNegative()))
1718 return nullptr;
1719
1720 NewCmpCst = C1.lshr(*C3);
1721 NewAndCst = C2.lshr(*C3);
1722 AnyCmpCstBitsShiftedOut = NewCmpCst.shl(*C3) != C1;
1723 } else if (ShiftOpcode == Instruction::LShr) {
1724 // For a logical right shift, we can fold if the comparison is not signed.
1725 // We can also fold a signed comparison if the shifted mask value and the
1726 // shifted comparison value are not negative. These constraints may not be
1727 // obvious, but we can prove that they are correct using an SMT solver.
1728 NewCmpCst = C1.shl(*C3);
1729 NewAndCst = C2.shl(*C3);
1730 AnyCmpCstBitsShiftedOut = NewCmpCst.lshr(*C3) != C1;
1731 if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative()))
1732 return nullptr;
1733 } else {
1734 // For an arithmetic shift, check that both constants don't use (in a
1735 // signed sense) the top bits being shifted out.
1736 assert(ShiftOpcode == Instruction::AShr && "Unknown shift opcode");
1737 NewCmpCst = C1.shl(*C3);
1738 NewAndCst = C2.shl(*C3);
1739 AnyCmpCstBitsShiftedOut = NewCmpCst.ashr(*C3) != C1;
1740 if (NewAndCst.ashr(*C3) != C2)
1741 return nullptr;
1742 }
1743
1744 if (AnyCmpCstBitsShiftedOut) {
1745 // If we shifted bits out, the fold is not going to work out. As a
1746 // special case, check to see if this means that the result is always
1747 // true or false now.
1748 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1749 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1750 if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1751 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1752 } else {
1753 Value *NewAnd = Builder.CreateAnd(
1754 Shift->getOperand(0), ConstantInt::get(And->getType(), NewAndCst));
1755 return new ICmpInst(Cmp.getPredicate(), NewAnd,
1756 ConstantInt::get(And->getType(), NewCmpCst));
1757 }
1758 }
1759
1760 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is
1761 // preferable because it allows the C2 << Y expression to be hoisted out of a
1762 // loop if Y is invariant and X is not.
1763 if (Shift->hasOneUse() && C1.isZero() && Cmp.isEquality() &&
1764 !Shift->isArithmeticShift() &&
1765 ((!IsShl && C2.isOne()) || !isa<Constant>(Shift->getOperand(0)))) {
1766 // Compute C2 << Y.
1767 Value *NewShift =
1768 IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
1769 : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
1770
1771 // Compute X & (C2 << Y).
1772 Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
1773 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1774 }
1775
1776 return nullptr;
1777}
1778
1779/// Fold icmp (and X, C2), C1.
1782 const APInt &C1) {
1783 bool isICMP_NE = Cmp.getPredicate() == ICmpInst::ICMP_NE;
1784
1785 // For vectors: icmp ne (and X, 1), 0 --> trunc X to N x i1
1786 // TODO: We canonicalize to the longer form for scalars because we have
1787 // better analysis/folds for icmp, and codegen may be better with icmp.
1788 if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isZero() &&
1789 match(And->getOperand(1), m_One()))
1790 return new TruncInst(And->getOperand(0), Cmp.getType());
1791
1792 const APInt *C2;
1793 Value *X;
1794 if (!match(And, m_And(m_Value(X), m_APInt(C2))))
1795 return nullptr;
1796
1797 // (and X, highmask) s> [0, ~highmask] --> X s> ~highmask
1798 if (Cmp.getPredicate() == ICmpInst::ICMP_SGT && C1.ule(~*C2) &&
1799 C2->isNegatedPowerOf2())
1800 return new ICmpInst(ICmpInst::ICMP_SGT, X,
1801 ConstantInt::get(X->getType(), ~*C2));
1802 // (and X, highmask) s< [1, -highmask] --> X s< -highmask
1803 if (Cmp.getPredicate() == ICmpInst::ICMP_SLT && !C1.isSignMask() &&
1804 (C1 - 1).ule(~*C2) && C2->isNegatedPowerOf2() && !C2->isSignMask())
1805 return new ICmpInst(ICmpInst::ICMP_SLT, X,
1806 ConstantInt::get(X->getType(), -*C2));
1807
1808 // Don't perform the following transforms if the AND has multiple uses
1809 if (!And->hasOneUse())
1810 return nullptr;
1811
1812 if (Cmp.isEquality() && C1.isZero()) {
1813 // Restrict this fold to single-use 'and' (PR10267).
1814 // Replace (and X, (1 << size(X)-1) != 0) with X s< 0
1815 if (C2->isSignMask()) {
1816 Constant *Zero = Constant::getNullValue(X->getType());
1817 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
1818 return new ICmpInst(NewPred, X, Zero);
1819 }
1820
1821 APInt NewC2 = *C2;
1822 KnownBits Know = computeKnownBits(And->getOperand(0), And);
1823 // Set high zeros of C2 to allow matching negated power-of-2.
1824 NewC2 = *C2 | APInt::getHighBitsSet(C2->getBitWidth(),
1825 Know.countMinLeadingZeros());
1826
1827 // Restrict this fold only for single-use 'and' (PR10267).
1828 // ((%x & C) == 0) --> %x u< (-C) iff (-C) is power of two.
1829 if (NewC2.isNegatedPowerOf2()) {
1830 Constant *NegBOC = ConstantInt::get(And->getType(), -NewC2);
1831 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
1832 return new ICmpInst(NewPred, X, NegBOC);
1833 }
1834 }
1835
1836 // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1837 // the input width without changing the value produced, eliminate the cast:
1838 //
1839 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1840 //
1841 // We can do this transformation if the constants do not have their sign bits
1842 // set or if it is an equality comparison. Extending a relational comparison
1843 // when we're checking the sign bit would not work.
1844 Value *W;
1845 if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) &&
1846 (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) {
1847 // TODO: Is this a good transform for vectors? Wider types may reduce
1848 // throughput. Should this transform be limited (even for scalars) by using
1849 // shouldChangeType()?
1850 if (!Cmp.getType()->isVectorTy()) {
1851 Type *WideType = W->getType();
1852 unsigned WideScalarBits = WideType->getScalarSizeInBits();
1853 Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits));
1854 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1855 Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
1856 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1857 }
1858 }
1859
1860 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2))
1861 return I;
1862
1863 // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1864 // (icmp pred (and A, (or (shl 1, B), 1), 0))
1865 //
1866 // iff pred isn't signed
1867 if (!Cmp.isSigned() && C1.isZero() && And->getOperand(0)->hasOneUse() &&
1868 match(And->getOperand(1), m_One())) {
1869 Constant *One = cast<Constant>(And->getOperand(1));
1870 Value *Or = And->getOperand(0);
1871 Value *A, *B, *LShr;
1872 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1873 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1874 unsigned UsesRemoved = 0;
1875 if (And->hasOneUse())
1876 ++UsesRemoved;
1877 if (Or->hasOneUse())
1878 ++UsesRemoved;
1879 if (LShr->hasOneUse())
1880 ++UsesRemoved;
1881
1882 // Compute A & ((1 << B) | 1)
1883 unsigned RequireUsesRemoved = match(B, m_ImmConstant()) ? 1 : 3;
1884 if (UsesRemoved >= RequireUsesRemoved) {
1885 Value *NewOr =
1886 Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
1887 /*HasNUW=*/true),
1888 One, Or->getName());
1889 Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
1890 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1891 }
1892 }
1893 }
1894
1895 // (icmp eq (and (bitcast X to int), ExponentMask), ExponentMask) -->
1896 // llvm.is.fpclass(X, fcInf|fcNan)
1897 // (icmp ne (and (bitcast X to int), ExponentMask), ExponentMask) -->
1898 // llvm.is.fpclass(X, ~(fcInf|fcNan))
1899 // (icmp eq (and (bitcast X to int), ExponentMask), 0) -->
1900 // llvm.is.fpclass(X, fcSubnormal|fcZero)
1901 // (icmp ne (and (bitcast X to int), ExponentMask), 0) -->
1902 // llvm.is.fpclass(X, ~(fcSubnormal|fcZero))
1903 Value *V;
1904 if (!Cmp.getParent()->getParent()->hasFnAttribute(
1905 Attribute::NoImplicitFloat) &&
1906 Cmp.isEquality() &&
1908 Type *FPType = V->getType()->getScalarType();
1909 if (FPType->isIEEELikeFPTy() && (C1.isZero() || C1 == *C2)) {
1910 APInt ExponentMask =
1911 APFloat::getInf(FPType->getFltSemantics()).bitcastToAPInt();
1912 if (*C2 == ExponentMask) {
1913 unsigned Mask = C1.isZero()
1916 if (isICMP_NE)
1917 Mask = ~Mask & fcAllFlags;
1918 return replaceInstUsesWith(Cmp, Builder.createIsFPClass(V, Mask));
1919 }
1920 }
1921 }
1922
1923 return nullptr;
1924}
1925
1926/// Fold icmp (and X, Y), C.
1929 const APInt &C) {
1930 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1931 return I;
1932
1933 const ICmpInst::Predicate Pred = Cmp.getPredicate();
1934 bool TrueIfNeg;
1935 if (isSignBitCheck(Pred, C, TrueIfNeg)) {
1936 // ((X - 1) & ~X) < 0 --> X == 0
1937 // ((X - 1) & ~X) >= 0 --> X != 0
1938 Value *X;
1939 if (match(And->getOperand(0), m_Add(m_Value(X), m_AllOnes())) &&
1940 match(And->getOperand(1), m_Not(m_Specific(X)))) {
1941 auto NewPred = TrueIfNeg ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1942 return new ICmpInst(NewPred, X, ConstantInt::getNullValue(X->getType()));
1943 }
1944 // (X & -X) < 0 --> X == MinSignedC
1945 // (X & -X) > -1 --> X != MinSignedC
1946 if (match(And, m_c_And(m_Neg(m_Value(X)), m_Deferred(X)))) {
1947 Constant *MinSignedC = ConstantInt::get(
1948 X->getType(),
1949 APInt::getSignedMinValue(X->getType()->getScalarSizeInBits()));
1950 auto NewPred = TrueIfNeg ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1951 return new ICmpInst(NewPred, X, MinSignedC);
1952 }
1953 }
1954
1955 // TODO: These all require that Y is constant too, so refactor with the above.
1956
1957 // Try to optimize things like "A[i] & 42 == 0" to index computations.
1958 Value *X = And->getOperand(0);
1959 Value *Y = And->getOperand(1);
1960 if (auto *C2 = dyn_cast<ConstantInt>(Y))
1961 if (auto *LI = dyn_cast<LoadInst>(X))
1962 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1963 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(LI, GEP, Cmp, C2))
1964 return Res;
1965
1966 if (!Cmp.isEquality())
1967 return nullptr;
1968
1969 // X & -C == -C -> X > u ~C
1970 // X & -C != -C -> X <= u ~C
1971 // iff C is a power of 2
1972 if (Cmp.getOperand(1) == Y && C.isNegatedPowerOf2()) {
1973 auto NewPred =
1975 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1976 }
1977
1978 // ((zext i1 X) & Y) == 0 --> !((trunc Y) & X)
1979 // ((zext i1 X) & Y) != 0 --> ((trunc Y) & X)
1980 // ((zext i1 X) & Y) == 1 --> ((trunc Y) & X)
1981 // ((zext i1 X) & Y) != 1 --> !((trunc Y) & X)
1983 X->getType()->isIntOrIntVectorTy(1) && (C.isZero() || C.isOne())) {
1984 Value *TruncY = Builder.CreateTrunc(Y, X->getType());
1985 if (C.isZero() ^ (Pred == CmpInst::ICMP_NE)) {
1986 Value *And = Builder.CreateAnd(TruncY, X);
1988 }
1989 return BinaryOperator::CreateAnd(TruncY, X);
1990 }
1991
1992 // (icmp eq/ne (and (shl -1, X), Y), 0)
1993 // -> (icmp eq/ne (lshr Y, X), 0)
1994 // We could technically handle any C == 0 or (C < 0 && isOdd(C)) but it seems
1995 // highly unlikely the non-zero case will ever show up in code.
1996 if (C.isZero() &&
1998 m_Value(Y))))) {
1999 Value *LShr = Builder.CreateLShr(Y, X);
2000 return new ICmpInst(Pred, LShr, Constant::getNullValue(LShr->getType()));
2001 }
2002
2003 // (icmp eq/ne (and (add A, Addend), Msk), C)
2004 // -> (icmp eq/ne (and A, Msk), (and (sub C, Addend), Msk))
2005 {
2006 Value *A;
2007 const APInt *Addend, *Msk;
2008 if (match(And, m_And(m_OneUse(m_Add(m_Value(A), m_APInt(Addend))),
2009 m_LowBitMask(Msk))) &&
2010 C.ule(*Msk)) {
2011 APInt NewComperand = (C - *Addend) & *Msk;
2012 Value *MaskA = Builder.CreateAnd(A, ConstantInt::get(A->getType(), *Msk));
2013 return new ICmpInst(Pred, MaskA,
2014 ConstantInt::get(MaskA->getType(), NewComperand));
2015 }
2016 }
2017
2018 return nullptr;
2019}
2020
2021/// Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
2023 InstCombiner::BuilderTy &Builder) {
2024 // Are we using xors or subs to bitwise check for a pair or pairs of
2025 // (in)equalities? Convert to a shorter form that has more potential to be
2026 // folded even further.
2027 // ((X1 ^/- X2) || (X3 ^/- X4)) == 0 --> (X1 == X2) && (X3 == X4)
2028 // ((X1 ^/- X2) || (X3 ^/- X4)) != 0 --> (X1 != X2) || (X3 != X4)
2029 // ((X1 ^/- X2) || (X3 ^/- X4) || (X5 ^/- X6)) == 0 -->
2030 // (X1 == X2) && (X3 == X4) && (X5 == X6)
2031 // ((X1 ^/- X2) || (X3 ^/- X4) || (X5 ^/- X6)) != 0 -->
2032 // (X1 != X2) || (X3 != X4) || (X5 != X6)
2034 SmallVector<Value *, 16> WorkList(1, Or);
2035
2036 while (!WorkList.empty()) {
2037 auto MatchOrOperatorArgument = [&](Value *OrOperatorArgument) {
2038 Value *Lhs, *Rhs;
2039
2040 if (match(OrOperatorArgument,
2041 m_OneUse(m_Xor(m_Value(Lhs), m_Value(Rhs))))) {
2042 CmpValues.emplace_back(Lhs, Rhs);
2043 return;
2044 }
2045
2046 if (match(OrOperatorArgument,
2047 m_OneUse(m_Sub(m_Value(Lhs), m_Value(Rhs))))) {
2048 CmpValues.emplace_back(Lhs, Rhs);
2049 return;
2050 }
2051
2052 WorkList.push_back(OrOperatorArgument);
2053 };
2054
2055 Value *CurrentValue = WorkList.pop_back_val();
2056 Value *OrOperatorLhs, *OrOperatorRhs;
2057
2058 if (!match(CurrentValue,
2059 m_Or(m_Value(OrOperatorLhs), m_Value(OrOperatorRhs)))) {
2060 return nullptr;
2061 }
2062
2063 MatchOrOperatorArgument(OrOperatorRhs);
2064 MatchOrOperatorArgument(OrOperatorLhs);
2065 }
2066
2067 ICmpInst::Predicate Pred = Cmp.getPredicate();
2068 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2069 Value *LhsCmp = Builder.CreateICmp(Pred, CmpValues.rbegin()->first,
2070 CmpValues.rbegin()->second);
2071
2072 for (auto It = CmpValues.rbegin() + 1; It != CmpValues.rend(); ++It) {
2073 Value *RhsCmp = Builder.CreateICmp(Pred, It->first, It->second);
2074 LhsCmp = Builder.CreateBinOp(BOpc, LhsCmp, RhsCmp);
2075 }
2076
2077 return LhsCmp;
2078}
2079
2080/// Fold icmp (or X, Y), C.
2083 const APInt &C) {
2084 ICmpInst::Predicate Pred = Cmp.getPredicate();
2085 if (C.isOne()) {
2086 // icmp slt signum(V) 1 --> icmp slt V, 1
2087 Value *V = nullptr;
2088 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
2089 return new ICmpInst(ICmpInst::ICMP_SLT, V,
2090 ConstantInt::get(V->getType(), 1));
2091 }
2092
2093 Value *OrOp0 = Or->getOperand(0), *OrOp1 = Or->getOperand(1);
2094
2095 // (icmp eq/ne (or disjoint x, C0), C1)
2096 // -> (icmp eq/ne x, C0^C1)
2097 if (Cmp.isEquality() && match(OrOp1, m_ImmConstant()) &&
2098 cast<PossiblyDisjointInst>(Or)->isDisjoint()) {
2099 Value *NewC =
2100 Builder.CreateXor(OrOp1, ConstantInt::get(OrOp1->getType(), C));
2101 return new ICmpInst(Pred, OrOp0, NewC);
2102 }
2103
2104 const APInt *MaskC;
2105 if (match(OrOp1, m_APInt(MaskC)) && Cmp.isEquality()) {
2106 if (*MaskC == C && (C + 1).isPowerOf2()) {
2107 // X | C == C --> X <=u C
2108 // X | C != C --> X >u C
2109 // iff C+1 is a power of 2 (C is a bitmask of the low bits)
2111 return new ICmpInst(Pred, OrOp0, OrOp1);
2112 }
2113
2114 // More general: canonicalize 'equality with set bits mask' to
2115 // 'equality with clear bits mask'.
2116 // (X | MaskC) == C --> (X & ~MaskC) == C ^ MaskC
2117 // (X | MaskC) != C --> (X & ~MaskC) != C ^ MaskC
2118 if (Or->hasOneUse()) {
2119 Value *And = Builder.CreateAnd(OrOp0, ~(*MaskC));
2120 Constant *NewC = ConstantInt::get(Or->getType(), C ^ (*MaskC));
2121 return new ICmpInst(Pred, And, NewC);
2122 }
2123 }
2124
2125 // (X | (X-1)) s< 0 --> X s< 1
2126 // (X | (X-1)) s> -1 --> X s> 0
2127 Value *X;
2128 bool TrueIfSigned;
2129 if (isSignBitCheck(Pred, C, TrueIfSigned) &&
2131 auto NewPred = TrueIfSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGT;
2132 Constant *NewC = ConstantInt::get(X->getType(), TrueIfSigned ? 1 : 0);
2133 return new ICmpInst(NewPred, X, NewC);
2134 }
2135
2136 const APInt *OrC;
2137 // icmp(X | OrC, C) --> icmp(X, 0)
2138 if (C.isNonNegative() && match(Or, m_Or(m_Value(X), m_APInt(OrC)))) {
2139 switch (Pred) {
2140 // X | OrC s< C --> X s< 0 iff OrC s>= C s>= 0
2141 case ICmpInst::ICMP_SLT:
2142 // X | OrC s>= C --> X s>= 0 iff OrC s>= C s>= 0
2143 case ICmpInst::ICMP_SGE:
2144 if (OrC->sge(C))
2145 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2146 break;
2147 // X | OrC s<= C --> X s< 0 iff OrC s> C s>= 0
2148 case ICmpInst::ICMP_SLE:
2149 // X | OrC s> C --> X s>= 0 iff OrC s> C s>= 0
2150 case ICmpInst::ICMP_SGT:
2151 if (OrC->sgt(C))
2153 ConstantInt::getNullValue(X->getType()));
2154 break;
2155 default:
2156 break;
2157 }
2158 }
2159
2160 if (!Cmp.isEquality() || !C.isZero() || !Or->hasOneUse())
2161 return nullptr;
2162
2163 Value *P, *Q;
2165 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
2166 // -> and (icmp eq P, null), (icmp eq Q, null).
2167 Value *CmpP =
2168 Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
2169 Value *CmpQ =
2170 Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
2171 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2172 return BinaryOperator::Create(BOpc, CmpP, CmpQ);
2173 }
2174
2175 if (Value *V = foldICmpOrXorSubChain(Cmp, Or, Builder))
2176 return replaceInstUsesWith(Cmp, V);
2177
2178 return nullptr;
2179}
2180
2181/// Fold icmp (mul X, Y), C.
2184 const APInt &C) {
2185 ICmpInst::Predicate Pred = Cmp.getPredicate();
2186 Type *MulTy = Mul->getType();
2187 Value *X = Mul->getOperand(0);
2188
2189 // If there's no overflow:
2190 // X * X == 0 --> X == 0
2191 // X * X != 0 --> X != 0
2192 if (Cmp.isEquality() && C.isZero() && X == Mul->getOperand(1) &&
2193 (Mul->hasNoUnsignedWrap() || Mul->hasNoSignedWrap()))
2194 return new ICmpInst(Pred, X, ConstantInt::getNullValue(MulTy));
2195
2196 const APInt *MulC;
2197 if (!match(Mul->getOperand(1), m_APInt(MulC)))
2198 return nullptr;
2199
2200 // If this is a test of the sign bit and the multiply is sign-preserving with
2201 // a constant operand, use the multiply LHS operand instead:
2202 // (X * +MulC) < 0 --> X < 0
2203 // (X * -MulC) < 0 --> X > 0
2204 if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) {
2205 if (MulC->isNegative())
2206 Pred = ICmpInst::getSwappedPredicate(Pred);
2207 return new ICmpInst(Pred, X, ConstantInt::getNullValue(MulTy));
2208 }
2209
2210 if (MulC->isZero())
2211 return nullptr;
2212
2213 // If the multiply does not wrap or the constant is odd, try to divide the
2214 // compare constant by the multiplication factor.
2215 if (Cmp.isEquality()) {
2216 // (mul nsw X, MulC) eq/ne C --> X eq/ne C /s MulC
2217 if (Mul->hasNoSignedWrap() && C.srem(*MulC).isZero()) {
2218 Constant *NewC = ConstantInt::get(MulTy, C.sdiv(*MulC));
2219 return new ICmpInst(Pred, X, NewC);
2220 }
2221
2222 // C % MulC == 0 is weaker than we could use if MulC is odd because it
2223 // correct to transform if MulC * N == C including overflow. I.e with i8
2224 // (icmp eq (mul X, 5), 101) -> (icmp eq X, 225) but since 101 % 5 != 0, we
2225 // miss that case.
2226 if (C.urem(*MulC).isZero()) {
2227 // (mul nuw X, MulC) eq/ne C --> X eq/ne C /u MulC
2228 // (mul X, OddC) eq/ne N * C --> X eq/ne N
2229 if ((*MulC & 1).isOne() || Mul->hasNoUnsignedWrap()) {
2230 Constant *NewC = ConstantInt::get(MulTy, C.udiv(*MulC));
2231 return new ICmpInst(Pred, X, NewC);
2232 }
2233 }
2234 }
2235
2236 // With a matching no-overflow guarantee, fold the constants:
2237 // (X * MulC) < C --> X < (C / MulC)
2238 // (X * MulC) > C --> X > (C / MulC)
2239 // TODO: Assert that Pred is not equal to SGE, SLE, UGE, ULE?
2240 Constant *NewC = nullptr;
2241 if (Mul->hasNoSignedWrap() && ICmpInst::isSigned(Pred)) {
2242 // MININT / -1 --> overflow.
2243 if (C.isMinSignedValue() && MulC->isAllOnes())
2244 return nullptr;
2245 if (MulC->isNegative())
2246 Pred = ICmpInst::getSwappedPredicate(Pred);
2247
2248 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2249 NewC = ConstantInt::get(
2251 } else {
2252 assert((Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_SGT) &&
2253 "Unexpected predicate");
2254 NewC = ConstantInt::get(
2256 }
2257 } else if (Mul->hasNoUnsignedWrap() && ICmpInst::isUnsigned(Pred)) {
2258 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE) {
2259 NewC = ConstantInt::get(
2261 } else {
2262 assert((Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
2263 "Unexpected predicate");
2264 NewC = ConstantInt::get(
2266 }
2267 }
2268
2269 return NewC ? new ICmpInst(Pred, X, NewC) : nullptr;
2270}
2271
2272/// Fold icmp (shl nuw C2, Y), C.
2274 const APInt &C) {
2275 Value *Y;
2276 const APInt *C2;
2277 if (!match(Shl, m_NUWShl(m_APInt(C2), m_Value(Y))))
2278 return nullptr;
2279
2280 Type *ShiftType = Shl->getType();
2281 unsigned TypeBits = C.getBitWidth();
2282 ICmpInst::Predicate Pred = Cmp.getPredicate();
2283 if (Cmp.isUnsigned()) {
2284 if (C2->isZero() || C2->ugt(C))
2285 return nullptr;
2286 APInt Div, Rem;
2287 APInt::udivrem(C, *C2, Div, Rem);
2288 bool CIsPowerOf2 = Rem.isZero() && Div.isPowerOf2();
2289
2290 // (1 << Y) pred C -> Y pred Log2(C)
2291 if (!CIsPowerOf2) {
2292 // (1 << Y) < 30 -> Y <= 4
2293 // (1 << Y) <= 30 -> Y <= 4
2294 // (1 << Y) >= 30 -> Y > 4
2295 // (1 << Y) > 30 -> Y > 4
2296 if (Pred == ICmpInst::ICMP_ULT)
2297 Pred = ICmpInst::ICMP_ULE;
2298 else if (Pred == ICmpInst::ICMP_UGE)
2299 Pred = ICmpInst::ICMP_UGT;
2300 }
2301
2302 unsigned CLog2 = Div.logBase2();
2303 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
2304 } else if (Cmp.isSigned() && C2->isOne()) {
2305 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
2306 // (1 << Y) > 0 -> Y != 31
2307 // (1 << Y) > C -> Y != 31 if C is negative.
2308 if (Pred == ICmpInst::ICMP_SGT && C.sle(0))
2309 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2310
2311 // (1 << Y) < 0 -> Y == 31
2312 // (1 << Y) < 1 -> Y == 31
2313 // (1 << Y) < C -> Y == 31 if C is negative and not signed min.
2314 // Exclude signed min by subtracting 1 and lower the upper bound to 0.
2315 if (Pred == ICmpInst::ICMP_SLT && (C - 1).sle(0))
2316 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2317 }
2318
2319 return nullptr;
2320}
2321
2322/// Fold icmp (shl X, Y), C.
2324 BinaryOperator *Shl,
2325 const APInt &C) {
2326 const APInt *ShiftVal;
2327 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
2328 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal);
2329
2330 ICmpInst::Predicate Pred = Cmp.getPredicate();
2331 // (icmp pred (shl nuw&nsw X, Y), Csle0)
2332 // -> (icmp pred X, Csle0)
2333 //
2334 // The idea is the nuw/nsw essentially freeze the sign bit for the shift op
2335 // so X's must be what is used.
2336 if (C.sle(0) && Shl->hasNoUnsignedWrap() && Shl->hasNoSignedWrap())
2337 return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
2338
2339 // (icmp eq/ne (shl nuw|nsw X, Y), 0)
2340 // -> (icmp eq/ne X, 0)
2341 if (ICmpInst::isEquality(Pred) && C.isZero() &&
2342 (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap()))
2343 return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
2344
2345 // (icmp slt (shl nsw X, Y), 0/1)
2346 // -> (icmp slt X, 0/1)
2347 // (icmp sgt (shl nsw X, Y), 0/-1)
2348 // -> (icmp sgt X, 0/-1)
2349 //
2350 // NB: sge/sle with a constant will canonicalize to sgt/slt.
2351 if (Shl->hasNoSignedWrap() &&
2352 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT))
2353 if (C.isZero() || (Pred == ICmpInst::ICMP_SGT ? C.isAllOnes() : C.isOne()))
2354 return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
2355
2356 const APInt *ShiftAmt;
2357 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
2358 return foldICmpShlLHSC(Cmp, Shl, C);
2359
2360 // Check that the shift amount is in range. If not, don't perform undefined
2361 // shifts. When the shift is visited, it will be simplified.
2362 unsigned TypeBits = C.getBitWidth();
2363 if (ShiftAmt->uge(TypeBits))
2364 return nullptr;
2365
2366 Value *X = Shl->getOperand(0);
2367 Type *ShType = Shl->getType();
2368
2369 // NSW guarantees that we are only shifting out sign bits from the high bits,
2370 // so we can ASHR the compare constant without needing a mask and eliminate
2371 // the shift.
2372 if (Shl->hasNoSignedWrap()) {
2373 if (Pred == ICmpInst::ICMP_SGT) {
2374 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
2375 APInt ShiftedC = C.ashr(*ShiftAmt);
2376 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2377 }
2378 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2379 C.ashr(*ShiftAmt).shl(*ShiftAmt) == C) {
2380 APInt ShiftedC = C.ashr(*ShiftAmt);
2381 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2382 }
2383 if (Pred == ICmpInst::ICMP_SLT) {
2384 // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
2385 // (X << S) <=s C is equiv to X <=s (C >> S) for all C
2386 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
2387 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
2388 assert(!C.isMinSignedValue() && "Unexpected icmp slt");
2389 APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1;
2390 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2391 }
2392 }
2393
2394 // NUW guarantees that we are only shifting out zero bits from the high bits,
2395 // so we can LSHR the compare constant without needing a mask and eliminate
2396 // the shift.
2397 if (Shl->hasNoUnsignedWrap()) {
2398 if (Pred == ICmpInst::ICMP_UGT) {
2399 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
2400 APInt ShiftedC = C.lshr(*ShiftAmt);
2401 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2402 }
2403 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2404 C.lshr(*ShiftAmt).shl(*ShiftAmt) == C) {
2405 APInt ShiftedC = C.lshr(*ShiftAmt);
2406 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2407 }
2408 if (Pred == ICmpInst::ICMP_ULT) {
2409 // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
2410 // (X << S) <=u C is equiv to X <=u (C >> S) for all C
2411 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
2412 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
2413 assert(C.ugt(0) && "ult 0 should have been eliminated");
2414 APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1;
2415 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2416 }
2417 }
2418
2419 if (Cmp.isEquality() && Shl->hasOneUse()) {
2420 // Strength-reduce the shift into an 'and'.
2421 Constant *Mask = ConstantInt::get(
2422 ShType,
2423 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
2424 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2425 Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt));
2426 return new ICmpInst(Pred, And, LShrC);
2427 }
2428
2429 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2430 bool TrueIfSigned = false;
2431 if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) {
2432 // (X << 31) <s 0 --> (X & 1) != 0
2433 Constant *Mask = ConstantInt::get(
2434 ShType,
2435 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2436 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2437 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2438 And, Constant::getNullValue(ShType));
2439 }
2440
2441 // Simplify 'shl' inequality test into 'and' equality test.
2442 if (Cmp.isUnsigned() && Shl->hasOneUse()) {
2443 // (X l<< C2) u<=/u> C1 iff C1+1 is power of two -> X & (~C1 l>> C2) ==/!= 0
2444 if ((C + 1).isPowerOf2() &&
2445 (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT)) {
2446 Value *And = Builder.CreateAnd(X, (~C).lshr(ShiftAmt->getZExtValue()));
2447 return new ICmpInst(Pred == ICmpInst::ICMP_ULE ? ICmpInst::ICMP_EQ
2449 And, Constant::getNullValue(ShType));
2450 }
2451 // (X l<< C2) u</u>= C1 iff C1 is power of two -> X & (-C1 l>> C2) ==/!= 0
2452 if (C.isPowerOf2() &&
2453 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
2454 Value *And =
2455 Builder.CreateAnd(X, (~(C - 1)).lshr(ShiftAmt->getZExtValue()));
2456 return new ICmpInst(Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_EQ
2458 And, Constant::getNullValue(ShType));
2459 }
2460 }
2461
2462 // Transform (icmp pred iM (shl iM %v, N), C)
2463 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2464 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2465 // This enables us to get rid of the shift in favor of a trunc that may be
2466 // free on the target. It has the additional benefit of comparing to a
2467 // smaller constant that may be more target-friendly.
2468 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2469 if (Shl->hasOneUse() && Amt != 0 &&
2470 shouldChangeType(ShType->getScalarSizeInBits(), TypeBits - Amt)) {
2471 ICmpInst::Predicate CmpPred = Pred;
2472 APInt RHSC = C;
2473
2474 if (RHSC.countr_zero() < Amt && ICmpInst::isStrictPredicate(CmpPred)) {
2475 // Try the flipped strictness predicate.
2476 // e.g.:
2477 // icmp ult i64 (shl X, 32), 8589934593 ->
2478 // icmp ule i64 (shl X, 32), 8589934592 ->
2479 // icmp ule i32 (trunc X, i32), 2 ->
2480 // icmp ult i32 (trunc X, i32), 3
2481 if (auto FlippedStrictness = getFlippedStrictnessPredicateAndConstant(
2482 Pred, ConstantInt::get(ShType->getContext(), C))) {
2483 CmpPred = FlippedStrictness->first;
2484 RHSC = cast<ConstantInt>(FlippedStrictness->second)->getValue();
2485 }
2486 }
2487
2488 if (RHSC.countr_zero() >= Amt) {
2489 Type *TruncTy = ShType->getWithNewBitWidth(TypeBits - Amt);
2490 Constant *NewC =
2491 ConstantInt::get(TruncTy, RHSC.ashr(*ShiftAmt).trunc(TypeBits - Amt));
2492 return new ICmpInst(CmpPred,
2493 Builder.CreateTrunc(X, TruncTy, "", /*IsNUW=*/false,
2494 Shl->hasNoSignedWrap()),
2495 NewC);
2496 }
2497 }
2498
2499 return nullptr;
2500}
2501
2502/// Fold icmp ({al}shr X, Y), C.
2504 BinaryOperator *Shr,
2505 const APInt &C) {
2506 // An exact shr only shifts out zero bits, so:
2507 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2508 Value *X = Shr->getOperand(0);
2509 CmpInst::Predicate Pred = Cmp.getPredicate();
2510 if (Cmp.isEquality() && Shr->isExact() && C.isZero())
2511 return new ICmpInst(Pred, X, Cmp.getOperand(1));
2512
2513 bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2514 const APInt *ShiftValC;
2515 if (match(X, m_APInt(ShiftValC))) {
2516 if (Cmp.isEquality())
2517 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftValC);
2518
2519 // (ShiftValC >> Y) >s -1 --> Y != 0 with ShiftValC < 0
2520 // (ShiftValC >> Y) <s 0 --> Y == 0 with ShiftValC < 0
2521 bool TrueIfSigned;
2522 if (!IsAShr && ShiftValC->isNegative() &&
2523 isSignBitCheck(Pred, C, TrueIfSigned))
2524 return new ICmpInst(TrueIfSigned ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE,
2525 Shr->getOperand(1),
2526 ConstantInt::getNullValue(X->getType()));
2527
2528 // If the shifted constant is a power-of-2, test the shift amount directly:
2529 // (ShiftValC >> Y) >u C --> X <u (LZ(C) - LZ(ShiftValC))
2530 // (ShiftValC >> Y) <u C --> X >=u (LZ(C-1) - LZ(ShiftValC))
2531 if (!IsAShr && ShiftValC->isPowerOf2() &&
2532 (Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_ULT)) {
2533 bool IsUGT = Pred == CmpInst::ICMP_UGT;
2534 assert(ShiftValC->uge(C) && "Expected simplify of compare");
2535 assert((IsUGT || !C.isZero()) && "Expected X u< 0 to simplify");
2536
2537 unsigned CmpLZ = IsUGT ? C.countl_zero() : (C - 1).countl_zero();
2538 unsigned ShiftLZ = ShiftValC->countl_zero();
2539 Constant *NewC = ConstantInt::get(Shr->getType(), CmpLZ - ShiftLZ);
2540 auto NewPred = IsUGT ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE;
2541 return new ICmpInst(NewPred, Shr->getOperand(1), NewC);
2542 }
2543 }
2544
2545 const APInt *ShiftAmtC;
2546 if (!match(Shr->getOperand(1), m_APInt(ShiftAmtC)))
2547 return nullptr;
2548
2549 // Check that the shift amount is in range. If not, don't perform undefined
2550 // shifts. When the shift is visited it will be simplified.
2551 unsigned TypeBits = C.getBitWidth();
2552 unsigned ShAmtVal = ShiftAmtC->getLimitedValue(TypeBits);
2553 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2554 return nullptr;
2555
2556 bool IsExact = Shr->isExact();
2557 Type *ShrTy = Shr->getType();
2558 // TODO: If we could guarantee that InstSimplify would handle all of the
2559 // constant-value-based preconditions in the folds below, then we could assert
2560 // those conditions rather than checking them. This is difficult because of
2561 // undef/poison (PR34838).
2562 if (IsAShr && Shr->hasOneUse()) {
2563 if (IsExact && (Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_ULT) &&
2564 (C - 1).isPowerOf2() && C.countLeadingZeros() > ShAmtVal) {
2565 // When C - 1 is a power of two and the transform can be legally
2566 // performed, prefer this form so the produced constant is close to a
2567 // power of two.
2568 // icmp slt/ult (ashr exact X, ShAmtC), C
2569 // --> icmp slt/ult X, (C - 1) << ShAmtC) + 1
2570 APInt ShiftedC = (C - 1).shl(ShAmtVal) + 1;
2571 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2572 }
2573 if (IsExact || Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_ULT) {
2574 // When ShAmtC can be shifted losslessly:
2575 // icmp PRED (ashr exact X, ShAmtC), C --> icmp PRED X, (C << ShAmtC)
2576 // icmp slt/ult (ashr X, ShAmtC), C --> icmp slt/ult X, (C << ShAmtC)
2577 APInt ShiftedC = C.shl(ShAmtVal);
2578 if (ShiftedC.ashr(ShAmtVal) == C)
2579 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2580 }
2581 if (Pred == CmpInst::ICMP_SGT) {
2582 // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1
2583 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2584 if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() &&
2585 (ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
2586 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2587 }
2588 if (Pred == CmpInst::ICMP_UGT) {
2589 // icmp ugt (ashr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2590 // 'C + 1 << ShAmtC' can overflow as a signed number, so the 2nd
2591 // clause accounts for that pattern.
2592 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2593 if ((ShiftedC + 1).ashr(ShAmtVal) == (C + 1) ||
2594 (C + 1).shl(ShAmtVal).isMinSignedValue())
2595 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2596 }
2597
2598 // If the compare constant has significant bits above the lowest sign-bit,
2599 // then convert an unsigned cmp to a test of the sign-bit:
2600 // (ashr X, ShiftC) u> C --> X s< 0
2601 // (ashr X, ShiftC) u< C --> X s> -1
2602 if (C.getBitWidth() > 2 && C.getNumSignBits() <= ShAmtVal) {
2603 if (Pred == CmpInst::ICMP_UGT) {
2604 return new ICmpInst(CmpInst::ICMP_SLT, X,
2606 }
2607 if (Pred == CmpInst::ICMP_ULT) {
2608 return new ICmpInst(CmpInst::ICMP_SGT, X,
2610 }
2611 }
2612 } else if (!IsAShr) {
2613 if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) {
2614 // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC)
2615 // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC)
2616 APInt ShiftedC = C.shl(ShAmtVal);
2617 if (ShiftedC.lshr(ShAmtVal) == C)
2618 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2619 }
2620 if (Pred == CmpInst::ICMP_UGT) {
2621 // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2622 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2623 if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1))
2624 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2625 }
2626 }
2627
2628 if (!Cmp.isEquality())
2629 return nullptr;
2630
2631 // Handle equality comparisons of shift-by-constant.
2632
2633 // If the comparison constant changes with the shift, the comparison cannot
2634 // succeed (bits of the comparison constant cannot match the shifted value).
2635 // This should be known by InstSimplify and already be folded to true/false.
2636 assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) ||
2637 (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) &&
2638 "Expected icmp+shr simplify did not occur.");
2639
2640 // If the bits shifted out are known zero, compare the unshifted value:
2641 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
2642 if (Shr->isExact())
2643 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal));
2644
2645 if (Shr->hasOneUse()) {
2646 // Canonicalize the shift into an 'and':
2647 // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt)
2648 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2649 Constant *Mask = ConstantInt::get(ShrTy, Val);
2650 Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
2651 return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal));
2652 }
2653
2654 return nullptr;
2655}
2656
2658 BinaryOperator *SRem,
2659 const APInt &C) {
2660 const ICmpInst::Predicate Pred = Cmp.getPredicate();
2661 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT) {
2662 // Canonicalize unsigned predicates to signed:
2663 // (X s% DivisorC) u> C -> (X s% DivisorC) s< 0
2664 // iff (C s< 0 ? ~C : C) u>= abs(DivisorC)-1
2665 // (X s% DivisorC) u< C+1 -> (X s% DivisorC) s> -1
2666 // iff (C+1 s< 0 ? ~C : C) u>= abs(DivisorC)-1
2667
2668 const APInt *DivisorC;
2669 if (!match(SRem->getOperand(1), m_APInt(DivisorC)))
2670 return nullptr;
2671
2672 APInt NormalizedC = C;
2673 if (Pred == ICmpInst::ICMP_ULT) {
2674 assert(!NormalizedC.isZero() &&
2675 "ult X, 0 should have been simplified already.");
2676 --NormalizedC;
2677 }
2678 if (C.isNegative())
2679 NormalizedC.flipAllBits();
2680 assert(!DivisorC->isZero() &&
2681 "srem X, 0 should have been simplified already.");
2682 if (!NormalizedC.uge(DivisorC->abs() - 1))
2683 return nullptr;
2684
2685 Type *Ty = SRem->getType();
2686 if (Pred == ICmpInst::ICMP_UGT)
2687 return new ICmpInst(ICmpInst::ICMP_SLT, SRem,
2689 return new ICmpInst(ICmpInst::ICMP_SGT, SRem,
2691 }
2692 // Match an 'is positive' or 'is negative' comparison of remainder by a
2693 // constant power-of-2 value:
2694 // (X % pow2C) sgt/slt 0
2695 if (Pred != ICmpInst::ICMP_SGT && Pred != ICmpInst::ICMP_SLT &&
2696 Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
2697 return nullptr;
2698
2699 // TODO: The one-use check is standard because we do not typically want to
2700 // create longer instruction sequences, but this might be a special-case
2701 // because srem is not good for analysis or codegen.
2702 if (!SRem->hasOneUse())
2703 return nullptr;
2704
2705 const APInt *DivisorC;
2706 if (!match(SRem->getOperand(1), m_Power2(DivisorC)))
2707 return nullptr;
2708
2709 // For cmp_sgt/cmp_slt only zero valued C is handled.
2710 // For cmp_eq/cmp_ne only positive valued C is handled.
2711 if (((Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT) &&
2712 !C.isZero()) ||
2713 ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2714 !C.isStrictlyPositive()))
2715 return nullptr;
2716
2717 // Mask off the sign bit and the modulo bits (low-bits).
2718 Type *Ty = SRem->getType();
2719 APInt SignMask = APInt::getSignMask(Ty->getScalarSizeInBits());
2720 Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2721 Value *And = Builder.CreateAnd(SRem->getOperand(0), MaskC);
2722
2723 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE)
2724 return new ICmpInst(Pred, And, ConstantInt::get(Ty, C));
2725
2726 // For 'is positive?' check that the sign-bit is clear and at least 1 masked
2727 // bit is set. Example:
2728 // (i8 X % 32) s> 0 --> (X & 159) s> 0
2729 if (Pred == ICmpInst::ICMP_SGT)
2731
2732 // For 'is negative?' check that the sign-bit is set and at least 1 masked
2733 // bit is set. Example:
2734 // (i16 X % 4) s< 0 --> (X & 32771) u> 32768
2735 return new ICmpInst(ICmpInst::ICMP_UGT, And, ConstantInt::get(Ty, SignMask));
2736}
2737
2738/// Fold icmp (udiv X, Y), C.
2740 BinaryOperator *UDiv,
2741 const APInt &C) {
2742 ICmpInst::Predicate Pred = Cmp.getPredicate();
2743 Value *X = UDiv->getOperand(0);
2744 Value *Y = UDiv->getOperand(1);
2745 Type *Ty = UDiv->getType();
2746
2747 const APInt *C2;
2748 if (!match(X, m_APInt(C2)))
2749 return nullptr;
2750
2751 assert(*C2 != 0 && "udiv 0, X should have been simplified already.");
2752
2753 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2754 if (Pred == ICmpInst::ICMP_UGT) {
2755 assert(!C.isMaxValue() &&
2756 "icmp ugt X, UINT_MAX should have been simplified already.");
2757 return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2758 ConstantInt::get(Ty, C2->udiv(C + 1)));
2759 }
2760
2761 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2762 if (Pred == ICmpInst::ICMP_ULT) {
2763 assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2764 return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2765 ConstantInt::get(Ty, C2->udiv(C)));
2766 }
2767
2768 return nullptr;
2769}
2770
2771/// Fold icmp ({su}div X, Y), C.
2773 BinaryOperator *Div,
2774 const APInt &C) {
2775 ICmpInst::Predicate Pred = Cmp.getPredicate();
2776 Value *X = Div->getOperand(0);
2777 Value *Y = Div->getOperand(1);
2778 Type *Ty = Div->getType();
2779 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2780
2781 // If unsigned division and the compare constant is bigger than
2782 // UMAX/2 (negative), there's only one pair of values that satisfies an
2783 // equality check, so eliminate the division:
2784 // (X u/ Y) == C --> (X == C) && (Y == 1)
2785 // (X u/ Y) != C --> (X != C) || (Y != 1)
2786 // Similarly, if signed division and the compare constant is exactly SMIN:
2787 // (X s/ Y) == SMIN --> (X == SMIN) && (Y == 1)
2788 // (X s/ Y) != SMIN --> (X != SMIN) || (Y != 1)
2789 if (Cmp.isEquality() && Div->hasOneUse() && C.isSignBitSet() &&
2790 (!DivIsSigned || C.isMinSignedValue())) {
2791 Value *XBig = Builder.CreateICmp(Pred, X, ConstantInt::get(Ty, C));
2792 Value *YOne = Builder.CreateICmp(Pred, Y, ConstantInt::get(Ty, 1));
2793 auto Logic = Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2794 return BinaryOperator::Create(Logic, XBig, YOne);
2795 }
2796
2797 // Fold: icmp pred ([us]div X, C2), C -> range test
2798 // Fold this div into the comparison, producing a range check.
2799 // Determine, based on the divide type, what the range is being
2800 // checked. If there is an overflow on the low or high side, remember
2801 // it, otherwise compute the range [low, hi) bounding the new value.
2802 // See: InsertRangeTest above for the kinds of replacements possible.
2803 const APInt *C2;
2804 if (!match(Y, m_APInt(C2)))
2805 return nullptr;
2806
2807 // FIXME: If the operand types don't match the type of the divide
2808 // then don't attempt this transform. The code below doesn't have the
2809 // logic to deal with a signed divide and an unsigned compare (and
2810 // vice versa). This is because (x /s C2) <s C produces different
2811 // results than (x /s C2) <u C or (x /u C2) <s C or even
2812 // (x /u C2) <u C. Simply casting the operands and result won't
2813 // work. :( The if statement below tests that condition and bails
2814 // if it finds it.
2815 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2816 return nullptr;
2817
2818 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2819 // INT_MIN will also fail if the divisor is 1. Although folds of all these
2820 // division-by-constant cases should be present, we can not assert that they
2821 // have happened before we reach this icmp instruction.
2822 if (C2->isZero() || C2->isOne() || (DivIsSigned && C2->isAllOnes()))
2823 return nullptr;
2824
2825 // Compute Prod = C * C2. We are essentially solving an equation of
2826 // form X / C2 = C. We solve for X by multiplying C2 and C.
2827 // By solving for X, we can turn this into a range check instead of computing
2828 // a divide.
2829 APInt Prod = C * *C2;
2830
2831 // Determine if the product overflows by seeing if the product is not equal to
2832 // the divide. Make sure we do the same kind of divide as in the LHS
2833 // instruction that we're folding.
2834 bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C;
2835
2836 // If the division is known to be exact, then there is no remainder from the
2837 // divide, so the covered range size is unit, otherwise it is the divisor.
2838 APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2;
2839
2840 // Figure out the interval that is being checked. For example, a comparison
2841 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2842 // Compute this interval based on the constants involved and the signedness of
2843 // the compare/divide. This computes a half-open interval, keeping track of
2844 // whether either value in the interval overflows. After analysis each
2845 // overflow variable is set to 0 if it's corresponding bound variable is valid
2846 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2847 int LoOverflow = 0, HiOverflow = 0;
2848 APInt LoBound, HiBound;
2849
2850 if (!DivIsSigned) { // udiv
2851 // e.g. X/5 op 3 --> [15, 20)
2852 LoBound = Prod;
2853 HiOverflow = LoOverflow = ProdOV;
2854 if (!HiOverflow) {
2855 // If this is not an exact divide, then many values in the range collapse
2856 // to the same result value.
2857 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2858 }
2859 } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2860 if (C.isZero()) { // (X / pos) op 0
2861 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
2862 LoBound = -(RangeSize - 1);
2863 HiBound = RangeSize;
2864 } else if (C.isStrictlyPositive()) { // (X / pos) op pos
2865 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
2866 HiOverflow = LoOverflow = ProdOV;
2867 if (!HiOverflow)
2868 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2869 } else { // (X / pos) op neg
2870 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
2871 HiBound = Prod + 1;
2872 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2873 if (!LoOverflow) {
2874 APInt DivNeg = -RangeSize;
2875 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2876 }
2877 }
2878 } else if (C2->isNegative()) { // Divisor is < 0.
2879 if (Div->isExact())
2880 RangeSize.negate();
2881 if (C.isZero()) { // (X / neg) op 0
2882 // e.g. X/-5 op 0 --> [-4, 5)
2883 LoBound = RangeSize + 1;
2884 HiBound = -RangeSize;
2885 if (HiBound == *C2) { // -INTMIN = INTMIN
2886 HiOverflow = 1; // [INTMIN+1, overflow)
2887 HiBound = APInt(); // e.g. X/INTMIN = 0 --> X > INTMIN
2888 }
2889 } else if (C.isStrictlyPositive()) { // (X / neg) op pos
2890 // e.g. X/-5 op 3 --> [-19, -14)
2891 HiBound = Prod + 1;
2892 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2893 if (!LoOverflow)
2894 LoOverflow =
2895 addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1 : 0;
2896 } else { // (X / neg) op neg
2897 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
2898 LoOverflow = HiOverflow = ProdOV;
2899 if (!HiOverflow)
2900 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2901 }
2902
2903 // Dividing by a negative swaps the condition. LT <-> GT
2904 Pred = ICmpInst::getSwappedPredicate(Pred);
2905 }
2906
2907 switch (Pred) {
2908 default:
2909 llvm_unreachable("Unhandled icmp predicate!");
2910 case ICmpInst::ICMP_EQ:
2911 if (LoOverflow && HiOverflow)
2912 return replaceInstUsesWith(Cmp, Builder.getFalse());
2913 if (HiOverflow)
2914 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE,
2915 X, ConstantInt::get(Ty, LoBound));
2916 if (LoOverflow)
2917 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
2918 X, ConstantInt::get(Ty, HiBound));
2919 return replaceInstUsesWith(
2920 Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true));
2921 case ICmpInst::ICMP_NE:
2922 if (LoOverflow && HiOverflow)
2923 return replaceInstUsesWith(Cmp, Builder.getTrue());
2924 if (HiOverflow)
2925 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
2926 X, ConstantInt::get(Ty, LoBound));
2927 if (LoOverflow)
2928 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE,
2929 X, ConstantInt::get(Ty, HiBound));
2930 return replaceInstUsesWith(
2931 Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, false));
2932 case ICmpInst::ICMP_ULT:
2933 case ICmpInst::ICMP_SLT:
2934 if (LoOverflow == +1) // Low bound is greater than input range.
2935 return replaceInstUsesWith(Cmp, Builder.getTrue());
2936 if (LoOverflow == -1) // Low bound is less than input range.
2937 return replaceInstUsesWith(Cmp, Builder.getFalse());
2938 return new ICmpInst(Pred, X, ConstantInt::get(Ty, LoBound));
2939 case ICmpInst::ICMP_UGT:
2940 case ICmpInst::ICMP_SGT:
2941 if (HiOverflow == +1) // High bound greater than input range.
2942 return replaceInstUsesWith(Cmp, Builder.getFalse());
2943 if (HiOverflow == -1) // High bound less than input range.
2944 return replaceInstUsesWith(Cmp, Builder.getTrue());
2945 if (Pred == ICmpInst::ICMP_UGT)
2946 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, HiBound));
2947 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, HiBound));
2948 }
2949
2950 return nullptr;
2951}
2952
2953/// Fold icmp (sub X, Y), C.
2956 const APInt &C) {
2957 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2958 ICmpInst::Predicate Pred = Cmp.getPredicate();
2959 Type *Ty = Sub->getType();
2960
2961 // (SubC - Y) == C) --> Y == (SubC - C)
2962 // (SubC - Y) != C) --> Y != (SubC - C)
2963 Constant *SubC;
2964 if (Cmp.isEquality() && match(X, m_ImmConstant(SubC))) {
2965 return new ICmpInst(Pred, Y,
2966 ConstantExpr::getSub(SubC, ConstantInt::get(Ty, C)));
2967 }
2968
2969 // (icmp P (sub nuw|nsw C2, Y), C) -> (icmp swap(P) Y, C2-C)
2970 const APInt *C2;
2971 APInt SubResult;
2972 ICmpInst::Predicate SwappedPred = Cmp.getSwappedPredicate();
2973 bool HasNSW = Sub->hasNoSignedWrap();
2974 bool HasNUW = Sub->hasNoUnsignedWrap();
2975 if (match(X, m_APInt(C2)) &&
2976 ((Cmp.isUnsigned() && HasNUW) || (Cmp.isSigned() && HasNSW)) &&
2977 !subWithOverflow(SubResult, *C2, C, Cmp.isSigned()))
2978 return new ICmpInst(SwappedPred, Y, ConstantInt::get(Ty, SubResult));
2979
2980 // X - Y == 0 --> X == Y.
2981 // X - Y != 0 --> X != Y.
2982 // TODO: We allow this with multiple uses as long as the other uses are not
2983 // in phis. The phi use check is guarding against a codegen regression
2984 // for a loop test. If the backend could undo this (and possibly
2985 // subsequent transforms), we would not need this hack.
2986 if (Cmp.isEquality() && C.isZero() &&
2987 none_of((Sub->users()), [](const User *U) { return isa<PHINode>(U); }))
2988 return new ICmpInst(Pred, X, Y);
2989
2990 // The following transforms are only worth it if the only user of the subtract
2991 // is the icmp.
2992 // TODO: This is an artificial restriction for all of the transforms below
2993 // that only need a single replacement icmp. Can these use the phi test
2994 // like the transform above here?
2995 if (!Sub->hasOneUse())
2996 return nullptr;
2997
2998 if (Sub->hasNoSignedWrap()) {
2999 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
3000 if (Pred == ICmpInst::ICMP_SGT && C.isAllOnes())
3001 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
3002
3003 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
3004 if (Pred == ICmpInst::ICMP_SGT && C.isZero())
3005 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
3006
3007 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
3008 if (Pred == ICmpInst::ICMP_SLT && C.isZero())
3009 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
3010
3011 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
3012 if (Pred == ICmpInst::ICMP_SLT && C.isOne())
3013 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
3014 }
3015
3016 if (!match(X, m_APInt(C2)))
3017 return nullptr;
3018
3019 // C2 - Y <u C -> (Y | (C - 1)) == C2
3020 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2
3021 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() &&
3022 (*C2 & (C - 1)) == (C - 1))
3023 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X);
3024
3025 // C2 - Y >u C -> (Y | C) != C2
3026 // iff C2 & C == C and C + 1 is a power of 2
3027 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C)
3028 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X);
3029
3030 // We have handled special cases that reduce.
3031 // Canonicalize any remaining sub to add as:
3032 // (C2 - Y) > C --> (Y + ~C2) < ~C
3033 Value *Add = Builder.CreateAdd(Y, ConstantInt::get(Ty, ~(*C2)), "notsub",
3034 HasNUW, HasNSW);
3035 return new ICmpInst(SwappedPred, Add, ConstantInt::get(Ty, ~C));
3036}
3037
3038static Value *createLogicFromTable(const std::bitset<4> &Table, Value *Op0,
3039 Value *Op1, IRBuilderBase &Builder,
3040 bool HasOneUse) {
3041 auto FoldConstant = [&](bool Val) {
3042 Constant *Res = Val ? Builder.getTrue() : Builder.getFalse();
3043 if (Op0->getType()->isVectorTy())
3045 cast<VectorType>(Op0->getType())->getElementCount(), Res);
3046 return Res;
3047 };
3048
3049 switch (Table.to_ulong()) {
3050 case 0: // 0 0 0 0
3051 return FoldConstant(false);
3052 case 1: // 0 0 0 1
3053 return HasOneUse ? Builder.CreateNot(Builder.CreateOr(Op0, Op1)) : nullptr;
3054 case 2: // 0 0 1 0
3055 return HasOneUse ? Builder.CreateAnd(Builder.CreateNot(Op0), Op1) : nullptr;
3056 case 3: // 0 0 1 1
3057 return Builder.CreateNot(Op0);
3058 case 4: // 0 1 0 0
3059 return HasOneUse ? Builder.CreateAnd(Op0, Builder.CreateNot(Op1)) : nullptr;
3060 case 5: // 0 1 0 1
3061 return Builder.CreateNot(Op1);
3062 case 6: // 0 1 1 0
3063 return Builder.CreateXor(Op0, Op1);
3064 case 7: // 0 1 1 1
3065 return HasOneUse ? Builder.CreateNot(Builder.CreateAnd(Op0, Op1)) : nullptr;
3066 case 8: // 1 0 0 0
3067 return Builder.CreateAnd(Op0, Op1);
3068 case 9: // 1 0 0 1
3069 return HasOneUse ? Builder.CreateNot(Builder.CreateXor(Op0, Op1)) : nullptr;
3070 case 10: // 1 0 1 0
3071 return Op1;
3072 case 11: // 1 0 1 1
3073 return HasOneUse ? Builder.CreateOr(Builder.CreateNot(Op0), Op1) : nullptr;
3074 case 12: // 1 1 0 0
3075 return Op0;
3076 case 13: // 1 1 0 1
3077 return HasOneUse ? Builder.CreateOr(Op0, Builder.CreateNot(Op1)) : nullptr;
3078 case 14: // 1 1 1 0
3079 return Builder.CreateOr(Op0, Op1);
3080 case 15: // 1 1 1 1
3081 return FoldConstant(true);
3082 default:
3083 llvm_unreachable("Invalid Operation");
3084 }
3085 return nullptr;
3086}
3087
3089 ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) {
3090 Value *A, *B;
3091 Constant *C1, *C2, *C3, *C4;
3092 if (!(match(BO->getOperand(0),
3093 m_Select(m_Value(A), m_Constant(C1), m_Constant(C2)))) ||
3094 !match(BO->getOperand(1),
3095 m_Select(m_Value(B), m_Constant(C3), m_Constant(C4))) ||
3096 Cmp.getType() != A->getType())
3097 return nullptr;
3098
3099 std::bitset<4> Table;
3100 auto ComputeTable = [&](bool First, bool Second) -> std::optional<bool> {
3101 Constant *L = First ? C1 : C2;
3102 Constant *R = Second ? C3 : C4;
3103 if (auto *Res = ConstantFoldBinaryOpOperands(BO->getOpcode(), L, R, DL)) {
3104 auto *Val = Res->getType()->isVectorTy() ? Res->getSplatValue() : Res;
3105 if (auto *CI = dyn_cast_or_null<ConstantInt>(Val))
3106 return ICmpInst::compare(CI->getValue(), C, Cmp.getPredicate());
3107 }
3108 return std::nullopt;
3109 };
3110
3111 for (unsigned I = 0; I < 4; ++I) {
3112 bool First = (I >> 1) & 1;
3113 bool Second = I & 1;
3114 if (auto Res = ComputeTable(First, Second))
3115 Table[I] = *Res;
3116 else
3117 return nullptr;
3118 }
3119
3120 // Synthesize optimal logic.
3121 if (auto *Cond = createLogicFromTable(Table, A, B, Builder, BO->hasOneUse()))
3122 return replaceInstUsesWith(Cmp, Cond);
3123 return nullptr;
3124}
3125
3126/// Fold icmp (add X, Y), C.
3129 const APInt &C) {
3130 Value *Y = Add->getOperand(1);
3131 Value *X = Add->getOperand(0);
3132
3133 Value *Op0, *Op1;
3134 Instruction *Ext0, *Ext1;
3135 const CmpInst::Predicate Pred = Cmp.getPredicate();
3136 if (match(Add,
3139 m_ZExtOrSExt(m_Value(Op1))))) &&
3140 Op0->getType()->isIntOrIntVectorTy(1) &&
3141 Op1->getType()->isIntOrIntVectorTy(1)) {
3142 unsigned BW = C.getBitWidth();
3143 std::bitset<4> Table;
3144 auto ComputeTable = [&](bool Op0Val, bool Op1Val) {
3145 APInt Res(BW, 0);
3146 if (Op0Val)
3147 Res += APInt(BW, isa<ZExtInst>(Ext0) ? 1 : -1, /*isSigned=*/true);
3148 if (Op1Val)
3149 Res += APInt(BW, isa<ZExtInst>(Ext1) ? 1 : -1, /*isSigned=*/true);
3150 return ICmpInst::compare(Res, C, Pred);
3151 };
3152
3153 Table[0] = ComputeTable(false, false);
3154 Table[1] = ComputeTable(false, true);
3155 Table[2] = ComputeTable(true, false);
3156 Table[3] = ComputeTable(true, true);
3157 if (auto *Cond =
3158 createLogicFromTable(Table, Op0, Op1, Builder, Add->hasOneUse()))
3159 return replaceInstUsesWith(Cmp, Cond);
3160 }
3161 const APInt *C2;
3162 if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
3163 return nullptr;
3164
3165 // Fold icmp pred (add X, C2), C.
3166 Type *Ty = Add->getType();
3167
3168 // If the add does not wrap, we can always adjust the compare by subtracting
3169 // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
3170 // are canonicalized to SGT/SLT/UGT/ULT.
3171 if ((Add->hasNoSignedWrap() &&
3172 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) ||
3173 (Add->hasNoUnsignedWrap() &&
3174 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) {
3175 bool Overflow;
3176 APInt NewC =
3177 Cmp.isSigned() ? C.ssub_ov(*C2, Overflow) : C.usub_ov(*C2, Overflow);
3178 // If there is overflow, the result must be true or false.
3179 // TODO: Can we assert there is no overflow because InstSimplify always
3180 // handles those cases?
3181 if (!Overflow)
3182 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
3183 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
3184 }
3185
3186 if (ICmpInst::isUnsigned(Pred) && Add->hasNoSignedWrap() &&
3187 C.isNonNegative() && (C - *C2).isNonNegative() &&
3188 computeConstantRange(X, /*ForSigned=*/true).add(*C2).isAllNonNegative())
3189 return new ICmpInst(ICmpInst::getSignedPredicate(Pred), X,
3190 ConstantInt::get(Ty, C - *C2));
3191
3192 auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2);
3193 const APInt &Upper = CR.getUpper();
3194 const APInt &Lower = CR.getLower();
3195 if (Cmp.isSigned()) {
3196 if (Lower.isSignMask())
3197 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
3198 if (Upper.isSignMask())
3199 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
3200 } else {
3201 if (Lower.isMinValue())
3202 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
3203 if (Upper.isMinValue())
3204 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
3205 }
3206
3207 // This set of folds is intentionally placed after folds that use no-wrapping
3208 // flags because those folds are likely better for later analysis/codegen.
3209 const APInt SMax = APInt::getSignedMaxValue(Ty->getScalarSizeInBits());
3210 const APInt SMin = APInt::getSignedMinValue(Ty->getScalarSizeInBits());
3211
3212 // Fold compare with offset to opposite sign compare if it eliminates offset:
3213 // (X + C2) >u C --> X <s -C2 (if C == C2 + SMAX)
3214 if (Pred == CmpInst::ICMP_UGT && C == *C2 + SMax)
3215 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, -(*C2)));
3216
3217 // (X + C2) <u C --> X >s ~C2 (if C == C2 + SMIN)
3218 if (Pred == CmpInst::ICMP_ULT && C == *C2 + SMin)
3219 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantInt::get(Ty, ~(*C2)));
3220
3221 // (X + C2) >s C --> X <u (SMAX - C) (if C == C2 - 1)
3222 if (Pred == CmpInst::ICMP_SGT && C == *C2 - 1)
3223 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, SMax - C));
3224
3225 // (X + C2) <s C --> X >u (C ^ SMAX) (if C == C2)
3226 if (Pred == CmpInst::ICMP_SLT && C == *C2)
3227 return new ICmpInst(ICmpInst::ICMP_UGT, X, ConstantInt::get(Ty, C ^ SMax));
3228
3229 // (X + -1) <u C --> X <=u C (if X is never null)
3230 if (Pred == CmpInst::ICMP_ULT && C2->isAllOnes()) {
3231 const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
3232 if (llvm::isKnownNonZero(X, Q))
3233 return new ICmpInst(ICmpInst::ICMP_ULE, X, ConstantInt::get(Ty, C));
3234 }
3235
3236 if (!Add->hasOneUse())
3237 return nullptr;
3238
3239 // X+C <u C2 -> (X & -C2) == C
3240 // iff C & (C2-1) == 0
3241 // C2 is a power of 2
3242 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0)
3243 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C),
3245
3246 // X+C2 <u C -> (X & C) == 2C
3247 // iff C == -(C2)
3248 // C2 is a power of 2
3249 if (Pred == ICmpInst::ICMP_ULT && C2->isPowerOf2() && C == -*C2)
3250 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, C),
3251 ConstantInt::get(Ty, C * 2));
3252
3253 // X+C >u C2 -> (X & ~C2) != C
3254 // iff C & C2 == 0
3255 // C2+1 is a power of 2
3256 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0)
3257 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C),
3259
3260 // The range test idiom can use either ult or ugt. Arbitrarily canonicalize
3261 // to the ult form.
3262 // X+C2 >u C -> X+(C2-C-1) <u ~C
3263 if (Pred == ICmpInst::ICMP_UGT)
3264 return new ICmpInst(ICmpInst::ICMP_ULT,
3265 Builder.CreateAdd(X, ConstantInt::get(Ty, *C2 - C - 1)),
3266 ConstantInt::get(Ty, ~C));
3267
3268 // zext(V) + C2 pred C -> V + C3 pred' C4
3269 Value *V;
3270 if (match(X, m_ZExt(m_Value(V)))) {
3271 Type *NewCmpTy = V->getType();
3272 unsigned NewCmpBW = NewCmpTy->getScalarSizeInBits();
3273 if (shouldChangeType(Ty, NewCmpTy)) {
3274 ConstantRange SrcCR = CR.truncate(NewCmpBW, TruncInst::NoUnsignedWrap);
3275 CmpInst::Predicate EquivPred;
3276 APInt EquivInt;
3277 APInt EquivOffset;
3278
3279 SrcCR.getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
3280 return new ICmpInst(
3281 EquivPred,
3282 EquivOffset.isZero()
3283 ? V
3284 : Builder.CreateAdd(V, ConstantInt::get(NewCmpTy, EquivOffset)),
3285 ConstantInt::get(NewCmpTy, EquivInt));
3286 }
3287 }
3288
3289 return nullptr;
3290}
3291
3293 Value *&RHS, ConstantInt *&Less,
3294 ConstantInt *&Equal,
3295 ConstantInt *&Greater) {
3296 // TODO: Generalize this to work with other comparison idioms or ensure
3297 // they get canonicalized into this form.
3298
3299 // select i1 (a == b),
3300 // i32 Equal,
3301 // i32 (select i1 (a < b), i32 Less, i32 Greater)
3302 // where Equal, Less and Greater are placeholders for any three constants.
3303 CmpPredicate PredA;
3304 if (!match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) ||
3305 !ICmpInst::isEquality(PredA))
3306 return false;
3307 Value *EqualVal = SI->getTrueValue();
3308 Value *UnequalVal = SI->getFalseValue();
3309 // We still can get non-canonical predicate here, so canonicalize.
3310 if (PredA == ICmpInst::ICMP_NE)
3311 std::swap(EqualVal, UnequalVal);
3312 if (!match(EqualVal, m_ConstantInt(Equal)))
3313 return false;
3314 CmpPredicate PredB;
3315 Value *LHS2, *RHS2;
3316 if (!match(UnequalVal, m_Select(m_ICmp(PredB, m_Value(LHS2), m_Value(RHS2)),
3317 m_ConstantInt(Less), m_ConstantInt(Greater))))
3318 return false;
3319 // We can get predicate mismatch here, so canonicalize if possible:
3320 // First, ensure that 'LHS' match.
3321 if (LHS2 != LHS) {
3322 // x sgt y <--> y slt x
3323 std::swap(LHS2, RHS2);
3324 PredB = ICmpInst::getSwappedPredicate(PredB);
3325 }
3326 if (LHS2 != LHS)
3327 return false;
3328 // We also need to canonicalize 'RHS'.
3329 if (PredB == ICmpInst::ICMP_SGT && isa<Constant>(RHS2)) {
3330 // x sgt C-1 <--> x sge C <--> not(x slt C)
3331 auto FlippedStrictness =
3333 if (!FlippedStrictness)
3334 return false;
3335 assert(FlippedStrictness->first == ICmpInst::ICMP_SGE &&
3336 "basic correctness failure");
3337 RHS2 = FlippedStrictness->second;
3338 // And kind-of perform the result swap.
3339 std::swap(Less, Greater);
3340 PredB = ICmpInst::ICMP_SLT;
3341 }
3342 return PredB == ICmpInst::ICMP_SLT && RHS == RHS2;
3343}
3344
3347 ConstantInt *C) {
3348
3349 assert(C && "Cmp RHS should be a constant int!");
3350 // If we're testing a constant value against the result of a three way
3351 // comparison, the result can be expressed directly in terms of the
3352 // original values being compared. Note: We could possibly be more
3353 // aggressive here and remove the hasOneUse test. The original select is
3354 // really likely to simplify or sink when we remove a test of the result.
3355 Value *OrigLHS, *OrigRHS;
3356 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
3357 if (Cmp.hasOneUse() &&
3358 matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal,
3359 C3GreaterThan)) {
3360 assert(C1LessThan && C2Equal && C3GreaterThan);
3361
3362 bool TrueWhenLessThan = ICmpInst::compare(
3363 C1LessThan->getValue(), C->getValue(), Cmp.getPredicate());
3364 bool TrueWhenEqual = ICmpInst::compare(C2Equal->getValue(), C->getValue(),
3365 Cmp.getPredicate());
3366 bool TrueWhenGreaterThan = ICmpInst::compare(
3367 C3GreaterThan->getValue(), C->getValue(), Cmp.getPredicate());
3368
3369 // This generates the new instruction that will replace the original Cmp
3370 // Instruction. Instead of enumerating the various combinations when
3371 // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus
3372 // false, we rely on chaining of ORs and future passes of InstCombine to
3373 // simplify the OR further (i.e. a s< b || a == b becomes a s<= b).
3374
3375 // When none of the three constants satisfy the predicate for the RHS (C),
3376 // the entire original Cmp can be simplified to a false.
3377 Value *Cond = Builder.getFalse();
3378 if (TrueWhenLessThan)
3379 Cond = Builder.CreateOr(
3380 Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS));
3381 if (TrueWhenEqual)
3382 Cond = Builder.CreateOr(
3383 Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS));
3384 if (TrueWhenGreaterThan)
3385 Cond = Builder.CreateOr(
3386 Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS));
3387
3388 return replaceInstUsesWith(Cmp, Cond);
3389 }
3390 return nullptr;
3391}
3392
3394 auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
3395 if (!Bitcast)
3396 return nullptr;
3397
3398 ICmpInst::Predicate Pred = Cmp.getPredicate();
3399 Value *Op1 = Cmp.getOperand(1);
3400 Value *BCSrcOp = Bitcast->getOperand(0);
3401 Type *SrcType = Bitcast->getSrcTy();
3402 Type *DstType = Bitcast->getType();
3403
3404 // Make sure the bitcast doesn't change between scalar and vector and
3405 // doesn't change the number of vector elements.
3406 if (SrcType->isVectorTy() == DstType->isVectorTy() &&
3407 SrcType->getScalarSizeInBits() == DstType->getScalarSizeInBits()) {
3408 // Zero-equality and sign-bit checks are preserved through sitofp + bitcast.
3409 Value *X;
3410 if (match(BCSrcOp, m_SIToFP(m_Value(X)))) {
3411 // icmp eq (bitcast (sitofp X)), 0 --> icmp eq X, 0
3412 // icmp ne (bitcast (sitofp X)), 0 --> icmp ne X, 0
3413 // icmp slt (bitcast (sitofp X)), 0 --> icmp slt X, 0
3414 // icmp sgt (bitcast (sitofp X)), 0 --> icmp sgt X, 0
3415 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_SLT ||
3416 Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT) &&
3417 match(Op1, m_Zero()))
3418 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
3419
3420 // icmp slt (bitcast (sitofp X)), 1 --> icmp slt X, 1
3421 if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_One()))
3422 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), 1));
3423
3424 // icmp sgt (bitcast (sitofp X)), -1 --> icmp sgt X, -1
3425 if (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))
3426 return new ICmpInst(Pred, X,
3427 ConstantInt::getAllOnesValue(X->getType()));
3428 }
3429
3430 // Zero-equality checks are preserved through unsigned floating-point casts:
3431 // icmp eq (bitcast (uitofp X)), 0 --> icmp eq X, 0
3432 // icmp ne (bitcast (uitofp X)), 0 --> icmp ne X, 0
3433 if (match(BCSrcOp, m_UIToFP(m_Value(X))))
3434 if (Cmp.isEquality() && match(Op1, m_Zero()))
3435 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
3436
3437 const APInt *C;
3438 bool TrueIfSigned;
3439 if (match(Op1, m_APInt(C)) && Bitcast->hasOneUse()) {
3440 // If this is a sign-bit test of a bitcast of a casted FP value, eliminate
3441 // the FP extend/truncate because that cast does not change the sign-bit.
3442 // This is true for all standard IEEE-754 types and the X86 80-bit type.
3443 // The sign-bit is always the most significant bit in those types.
3444 if (isSignBitCheck(Pred, *C, TrueIfSigned) &&
3445 (match(BCSrcOp, m_FPExt(m_Value(X))) ||
3446 match(BCSrcOp, m_FPTrunc(m_Value(X))))) {
3447 // (bitcast (fpext/fptrunc X)) to iX) < 0 --> (bitcast X to iY) < 0
3448 // (bitcast (fpext/fptrunc X)) to iX) > -1 --> (bitcast X to iY) > -1
3449 Type *XType = X->getType();
3450
3451 // We can't currently handle Power style floating point operations here.
3452 if (!(XType->isPPC_FP128Ty() || SrcType->isPPC_FP128Ty())) {
3453 Type *NewType = Builder.getIntNTy(XType->getScalarSizeInBits());
3454 if (auto *XVTy = dyn_cast<VectorType>(XType))
3455 NewType = VectorType::get(NewType, XVTy->getElementCount());
3456 Value *NewBitcast = Builder.CreateBitCast(X, NewType);
3457 if (TrueIfSigned)
3458 return new ICmpInst(ICmpInst::ICMP_SLT, NewBitcast,
3459 ConstantInt::getNullValue(NewType));
3460 else
3461 return new ICmpInst(ICmpInst::ICMP_SGT, NewBitcast,
3463 }
3464 }
3465
3466 // icmp eq/ne (bitcast X to int), special fp -> llvm.is.fpclass(X, class)
3467 Type *FPType = SrcType->getScalarType();
3468 if (!Cmp.getParent()->getParent()->hasFnAttribute(
3469 Attribute::NoImplicitFloat) &&
3470 Cmp.isEquality() && FPType->isIEEELikeFPTy()) {
3471 FPClassTest Mask = APFloat(FPType->getFltSemantics(), *C).classify();
3472 if (Mask & (fcInf | fcZero)) {
3473 if (Pred == ICmpInst::ICMP_NE)
3474 Mask = ~Mask;
3475 return replaceInstUsesWith(Cmp,
3476 Builder.createIsFPClass(BCSrcOp, Mask));
3477 }
3478 }
3479 }
3480 }
3481
3482 const APInt *C;
3483 if (!match(Cmp.getOperand(1), m_APInt(C)) || !DstType->isIntegerTy() ||
3484 !SrcType->isIntOrIntVectorTy())
3485 return nullptr;
3486
3487 // If this is checking if all elements of a vector compare are set or not,
3488 // invert the casted vector equality compare and test if all compare
3489 // elements are clear or not. Compare against zero is generally easier for
3490 // analysis and codegen.
3491 // icmp eq/ne (bitcast (not X) to iN), -1 --> icmp eq/ne (bitcast X to iN), 0
3492 // Example: are all elements equal? --> are zero elements not equal?
3493 // TODO: Try harder to reduce compare of 2 freely invertible operands?
3494 if (Cmp.isEquality() && C->isAllOnes() && Bitcast->hasOneUse()) {
3495 if (Value *NotBCSrcOp =
3496 getFreelyInverted(BCSrcOp, BCSrcOp->hasOneUse(), &Builder)) {
3497 Value *Cast = Builder.CreateBitCast(NotBCSrcOp, DstType);
3498 return new ICmpInst(Pred, Cast, ConstantInt::getNullValue(DstType));
3499 }
3500 }
3501
3502 // If this is checking if all elements of an extended vector are clear or not,
3503 // compare in a narrow type to eliminate the extend:
3504 // icmp eq/ne (bitcast (ext X) to iN), 0 --> icmp eq/ne (bitcast X to iM), 0
3505 Value *X;
3506 if (Cmp.isEquality() && C->isZero() && Bitcast->hasOneUse() &&
3507 match(BCSrcOp, m_ZExtOrSExt(m_Value(X)))) {
3508 if (auto *VecTy = dyn_cast<FixedVectorType>(X->getType())) {
3509 Type *NewType = Builder.getIntNTy(VecTy->getPrimitiveSizeInBits());
3510 Value *NewCast = Builder.CreateBitCast(X, NewType);
3511 return new ICmpInst(Pred, NewCast, ConstantInt::getNullValue(NewType));
3512 }
3513 }
3514
3515 // Folding: icmp <pred> iN X, C
3516 // where X = bitcast <M x iK> (shufflevector <M x iK> %vec, undef, SC)) to iN
3517 // and C is a splat of a K-bit pattern
3518 // and SC is a constant vector = <C', C', C', ..., C'>
3519 // Into:
3520 // %E = extractelement <M x iK> %vec, i32 C'
3521 // icmp <pred> iK %E, trunc(C)
3522 Value *Vec;
3523 ArrayRef<int> Mask;
3524 if (match(BCSrcOp, m_Shuffle(m_Value(Vec), m_Undef(), m_Mask(Mask)))) {
3525 // Check whether every element of Mask is the same constant
3526 if (all_equal(Mask)) {
3527 auto *VecTy = cast<VectorType>(SrcType);
3528 auto *EltTy = cast<IntegerType>(VecTy->getElementType());
3529 if (C->isSplat(EltTy->getBitWidth())) {
3530 // Fold the icmp based on the value of C
3531 // If C is M copies of an iK sized bit pattern,
3532 // then:
3533 // => %E = extractelement <N x iK> %vec, i32 Elem
3534 // icmp <pred> iK %SplatVal, <pattern>
3535 Value *Elem = Builder.getInt32(Mask[0]);
3536 Value *Extract = Builder.CreateExtractElement(Vec, Elem);
3537 Value *NewC = ConstantInt::get(EltTy, C->trunc(EltTy->getBitWidth()));
3538 return new ICmpInst(Pred, Extract, NewC);
3539 }
3540 }
3541 }
3542 return nullptr;
3543}
3544
3545/// Try to fold integer comparisons with a constant operand: icmp Pred X, C
3546/// where X is some kind of instruction.
3548 const APInt *C;
3549
3550 if (match(Cmp.getOperand(1), m_APInt(C))) {
3551 if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0)))
3552 if (Instruction *I = foldICmpBinOpWithConstant(Cmp, BO, *C))
3553 return I;
3554
3555 if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0)))
3556 // For now, we only support constant integers while folding the
3557 // ICMP(SELECT)) pattern. We can extend this to support vector of integers
3558 // similar to the cases handled by binary ops above.
3559 if (auto *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
3560 if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS))
3561 return I;
3562
3563 if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0)))
3564 if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C))
3565 return I;
3566
3567 if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
3569 return I;
3570
3571 // (extractval ([s/u]subo X, Y), 0) == 0 --> X == Y
3572 // (extractval ([s/u]subo X, Y), 0) != 0 --> X != Y
3573 // TODO: This checks one-use, but that is not strictly necessary.
3574 Value *Cmp0 = Cmp.getOperand(0);
3575 Value *X, *Y;
3576 if (C->isZero() && Cmp.isEquality() && Cmp0->hasOneUse() &&
3577 (match(Cmp0,
3579 m_Value(X), m_Value(Y)))) ||
3580 match(Cmp0,
3582 m_Value(X), m_Value(Y))))))
3583 return new ICmpInst(Cmp.getPredicate(), X, Y);
3584 }
3585
3586 if (match(Cmp.getOperand(1), m_APIntAllowPoison(C)))
3588
3589 return nullptr;
3590}
3591
3592/// Fold an icmp equality instruction with binary operator LHS and constant RHS:
3593/// icmp eq/ne BO, C.
3595 ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) {
3596 // TODO: Some of these folds could work with arbitrary constants, but this
3597 // function is limited to scalar and vector splat constants.
3598 if (!Cmp.isEquality())
3599 return nullptr;
3600
3601 ICmpInst::Predicate Pred = Cmp.getPredicate();
3602 bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
3603 Constant *RHS = cast<Constant>(Cmp.getOperand(1));
3604 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
3605
3606 switch (BO->getOpcode()) {
3607 case Instruction::SRem:
3608 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
3609 if (C.isZero() && BO->hasOneUse()) {
3610 const APInt *BOC;
3611 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
3612 Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
3613 return new ICmpInst(Pred, NewRem,
3615 }
3616 }
3617 break;
3618 case Instruction::Add: {
3619 // (A + C2) == C --> A == (C - C2)
3620 // (A + C2) != C --> A != (C - C2)
3621 // TODO: Remove the one-use limitation? See discussion in D58633.
3622 if (Constant *C2 = dyn_cast<Constant>(BOp1)) {
3623 if (BO->hasOneUse())
3624 return new ICmpInst(Pred, BOp0, ConstantExpr::getSub(RHS, C2));
3625 } else if (C.isZero()) {
3626 // Replace ((add A, B) != 0) with (A != -B) if A or B is
3627 // efficiently invertible, or if the add has just this one use.
3628 if (Value *NegVal = dyn_castNegVal(BOp1))
3629 return new ICmpInst(Pred, BOp0, NegVal);
3630 if (Value *NegVal = dyn_castNegVal(BOp0))
3631 return new ICmpInst(Pred, NegVal, BOp1);
3632 if (BO->hasOneUse()) {
3633 // (add nuw A, B) != 0 -> (or A, B) != 0
3634 if (match(BO, m_NUWAdd(m_Value(), m_Value()))) {
3635 Value *Or = Builder.CreateOr(BOp0, BOp1);
3636 return new ICmpInst(Pred, Or, Constant::getNullValue(BO->getType()));
3637 }
3638 Value *Neg = Builder.CreateNeg(BOp1);
3639 Neg->takeName(BO);
3640 return new ICmpInst(Pred, BOp0, Neg);
3641 }
3642 }
3643 break;
3644 }
3645 case Instruction::Xor:
3646 if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
3647 // For the xor case, we can xor two constants together, eliminating
3648 // the explicit xor.
3649 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
3650 } else if (C.isZero()) {
3651 // Replace ((xor A, B) != 0) with (A != B)
3652 return new ICmpInst(Pred, BOp0, BOp1);
3653 }
3654 break;
3655 case Instruction::Or: {
3656 const APInt *BOC;
3657 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
3658 // Comparing if all bits outside of a constant mask are set?
3659 // Replace (X | C) == -1 with (X & ~C) == ~C.
3660 // This removes the -1 constant.
3662 Value *And = Builder.CreateAnd(BOp0, NotBOC);
3663 return new ICmpInst(Pred, And, NotBOC);
3664 }
3665 // (icmp eq (or (select cond, 0, NonZero), Other), 0)
3666 // -> (and cond, (icmp eq Other, 0))
3667 // (icmp ne (or (select cond, NonZero, 0), Other), 0)
3668 // -> (or cond, (icmp ne Other, 0))
3669 Value *Cond, *TV, *FV, *Other, *Sel;
3670 if (C.isZero() &&
3671 match(BO,
3674 m_Value(FV))),
3675 m_Value(Other)))) &&
3676 Cond->getType() == Cmp.getType()) {
3677 const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
3678 // Easy case is if eq/ne matches whether 0 is trueval/falseval.
3679 if (Pred == ICmpInst::ICMP_EQ
3680 ? (match(TV, m_Zero()) && isKnownNonZero(FV, Q))
3681 : (match(FV, m_Zero()) && isKnownNonZero(TV, Q))) {
3682 Value *Cmp = Builder.CreateICmp(
3683 Pred, Other, Constant::getNullValue(Other->getType()));
3685 Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or, Cmp,
3686 Cond);
3687 }
3688 // Harder case is if eq/ne matches whether 0 is falseval/trueval. In this
3689 // case we need to invert the select condition so we need to be careful to
3690 // avoid creating extra instructions.
3691 // (icmp ne (or (select cond, 0, NonZero), Other), 0)
3692 // -> (or (not cond), (icmp ne Other, 0))
3693 // (icmp eq (or (select cond, NonZero, 0), Other), 0)
3694 // -> (and (not cond), (icmp eq Other, 0))
3695 //
3696 // Only do this if the inner select has one use, in which case we are
3697 // replacing `select` with `(not cond)`. Otherwise, we will create more
3698 // uses. NB: Trying to freely invert cond doesn't make sense here, as if
3699 // cond was freely invertable, the select arms would have been inverted.
3700 if (Sel->hasOneUse() &&
3701 (Pred == ICmpInst::ICMP_EQ
3702 ? (match(FV, m_Zero()) && isKnownNonZero(TV, Q))
3703 : (match(TV, m_Zero()) && isKnownNonZero(FV, Q)))) {
3704 Value *NotCond = Builder.CreateNot(Cond);
3705 Value *Cmp = Builder.CreateICmp(
3706 Pred, Other, Constant::getNullValue(Other->getType()));
3708 Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or, Cmp,
3709 NotCond);
3710 }
3711 }
3712 break;
3713 }
3714 case Instruction::UDiv:
3715 case Instruction::SDiv:
3716 if (BO->isExact()) {
3717 // div exact X, Y eq/ne 0 -> X eq/ne 0
3718 // div exact X, Y eq/ne 1 -> X eq/ne Y
3719 // div exact X, Y eq/ne C ->
3720 // if Y * C never-overflow && OneUse:
3721 // -> Y * C eq/ne X
3722 if (C.isZero())
3723 return new ICmpInst(Pred, BOp0, Constant::getNullValue(BO->getType()));
3724 else if (C.isOne())
3725 return new ICmpInst(Pred, BOp0, BOp1);
3726 else if (BO->hasOneUse()) {
3728 Instruction::Mul, BO->getOpcode() == Instruction::SDiv, BOp1,
3729 Cmp.getOperand(1), BO);
3731 Value *YC =
3732 Builder.CreateMul(BOp1, ConstantInt::get(BO->getType(), C));
3733 return new ICmpInst(Pred, YC, BOp0);
3734 }
3735 }
3736 }
3737 if (BO->getOpcode() == Instruction::UDiv && C.isZero()) {
3738 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
3739 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3740 return new ICmpInst(NewPred, BOp1, BOp0);
3741 }
3742 break;
3743 default:
3744 break;
3745 }
3746 return nullptr;
3747}
3748
3750 const APInt &CRhs,
3751 InstCombiner::BuilderTy &Builder,
3752 const SimplifyQuery &Q) {
3753 assert(CtpopLhs->getIntrinsicID() == Intrinsic::ctpop &&
3754 "Non-ctpop intrin in ctpop fold");
3755 if (!CtpopLhs->hasOneUse())
3756 return nullptr;
3757
3758 // Power of 2 test:
3759 // isPow2OrZero : ctpop(X) u< 2
3760 // isPow2 : ctpop(X) == 1
3761 // NotPow2OrZero: ctpop(X) u> 1
3762 // NotPow2 : ctpop(X) != 1
3763 // If we know any bit of X can be folded to:
3764 // IsPow2 : X & (~Bit) == 0
3765 // NotPow2 : X & (~Bit) != 0
3766 const ICmpInst::Predicate Pred = I.getPredicate();
3767 if (((I.isEquality() || Pred == ICmpInst::ICMP_UGT) && CRhs == 1) ||
3768 (Pred == ICmpInst::ICMP_ULT && CRhs == 2)) {
3769 Value *Op = CtpopLhs->getArgOperand(0);
3770 KnownBits OpKnown = computeKnownBits(Op, Q.DL, Q.AC, Q.CxtI, Q.DT);
3771 // No need to check for count > 1, that should be already constant folded.
3772 if (OpKnown.countMinPopulation() == 1) {
3773 Value *And = Builder.CreateAnd(
3774 Op, Constant::getIntegerValue(Op->getType(), ~(OpKnown.One)));
3775 return new ICmpInst(
3776 (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_ULT)
3779 And, Constant::getNullValue(Op->getType()));
3780 }
3781 }
3782
3783 return nullptr;
3784}
3785
3786/// Fold an equality icmp with LLVM intrinsic and constant operand.
3788 ICmpInst &Cmp, IntrinsicInst *II, const APInt &C) {
3789 Type *Ty = II->getType();
3790 unsigned BitWidth = C.getBitWidth();
3791 const ICmpInst::Predicate Pred = Cmp.getPredicate();
3792
3793 switch (II->getIntrinsicID()) {
3794 case Intrinsic::abs:
3795 // abs(A) == 0 -> A == 0
3796 // abs(A) == INT_MIN -> A == INT_MIN
3797 if (C.isZero() || C.isMinSignedValue())
3798 return new ICmpInst(Pred, II->getArgOperand(0), ConstantInt::get(Ty, C));
3799 break;
3800
3801 case Intrinsic::bswap:
3802 // bswap(A) == C -> A == bswap(C)
3803 return new ICmpInst(Pred, II->getArgOperand(0),
3804 ConstantInt::get(Ty, C.byteSwap()));
3805
3806 case Intrinsic::bitreverse:
3807 // bitreverse(A) == C -> A == bitreverse(C)
3808 return new ICmpInst(Pred, II->getArgOperand(0),
3809 ConstantInt::get(Ty, C.reverseBits()));
3810
3811 case Intrinsic::ctlz:
3812 case Intrinsic::cttz: {
3813 // ctz(A) == bitwidth(A) -> A == 0 and likewise for !=
3814 if (C == BitWidth)
3815 return new ICmpInst(Pred, II->getArgOperand(0),
3817
3818 // ctz(A) == C -> A & Mask1 == Mask2, where Mask2 only has bit C set
3819 // and Mask1 has bits 0..C+1 set. Similar for ctl, but for high bits.
3820 // Limit to one use to ensure we don't increase instruction count.
3821 unsigned Num = C.getLimitedValue(BitWidth);
3822 if (Num != BitWidth && II->hasOneUse()) {
3823 bool IsTrailing = II->getIntrinsicID() == Intrinsic::cttz;
3824 APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1)
3825 : APInt::getHighBitsSet(BitWidth, Num + 1);
3826 APInt Mask2 = IsTrailing
3829 return new ICmpInst(Pred, Builder.CreateAnd(II->getArgOperand(0), Mask1),
3830 ConstantInt::get(Ty, Mask2));
3831 }
3832 break;
3833 }
3834
3835 case Intrinsic::ctpop: {
3836 // popcount(A) == 0 -> A == 0 and likewise for !=
3837 // popcount(A) == bitwidth(A) -> A == -1 and likewise for !=
3838 bool IsZero = C.isZero();
3839 if (IsZero || C == BitWidth)
3840 return new ICmpInst(Pred, II->getArgOperand(0),
3841 IsZero ? Constant::getNullValue(Ty)
3843
3844 break;
3845 }
3846
3847 case Intrinsic::fshl:
3848 case Intrinsic::fshr:
3849 if (II->getArgOperand(0) == II->getArgOperand(1)) {
3850 const APInt *RotAmtC;
3851 // ror(X, RotAmtC) == C --> X == rol(C, RotAmtC)
3852 // rol(X, RotAmtC) == C --> X == ror(C, RotAmtC)
3853 if (match(II->getArgOperand(2), m_APInt(RotAmtC)))
3854 return new ICmpInst(Pred, II->getArgOperand(0),
3855 II->getIntrinsicID() == Intrinsic::fshl
3856 ? ConstantInt::get(Ty, C.rotr(*RotAmtC))
3857 : ConstantInt::get(Ty, C.rotl(*RotAmtC)));
3858 }
3859 break;
3860
3861 case Intrinsic::umax:
3862 case Intrinsic::uadd_sat: {
3863 // uadd.sat(a, b) == 0 -> (a | b) == 0
3864 // umax(a, b) == 0 -> (a | b) == 0
3865 if (C.isZero() && II->hasOneUse()) {
3866 Value *Or = Builder.CreateOr(II->getArgOperand(0), II->getArgOperand(1));
3867 return new ICmpInst(Pred, Or, Constant::getNullValue(Ty));
3868 }
3869 break;
3870 }
3871
3872 case Intrinsic::ssub_sat:
3873 // ssub.sat(a, b) == 0 -> a == b
3874 if (C.isZero())
3875 return new ICmpInst(Pred, II->getArgOperand(0), II->getArgOperand(1));
3876 break;
3877 case Intrinsic::usub_sat: {
3878 // usub.sat(a, b) == 0 -> a <= b
3879 if (C.isZero()) {
3880 ICmpInst::Predicate NewPred =
3882 return new ICmpInst(NewPred, II->getArgOperand(0), II->getArgOperand(1));
3883 }
3884 break;
3885 }
3886 default:
3887 break;
3888 }
3889
3890 return nullptr;
3891}
3892
3893/// Fold an icmp with LLVM intrinsics
3894static Instruction *
3896 InstCombiner::BuilderTy &Builder) {
3897 assert(Cmp.isEquality());
3898
3899 ICmpInst::Predicate Pred = Cmp.getPredicate();
3900 Value *Op0 = Cmp.getOperand(0);
3901 Value *Op1 = Cmp.getOperand(1);
3902 const auto *IIOp0 = dyn_cast<IntrinsicInst>(Op0);
3903 const auto *IIOp1 = dyn_cast<IntrinsicInst>(Op1);
3904 if (!IIOp0 || !IIOp1 || IIOp0->getIntrinsicID() != IIOp1->getIntrinsicID())
3905 return nullptr;
3906
3907 switch (IIOp0->getIntrinsicID()) {
3908 case Intrinsic::bswap:
3909 case Intrinsic::bitreverse:
3910 // If both operands are byte-swapped or bit-reversed, just compare the
3911 // original values.
3912 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3913 case Intrinsic::fshl:
3914 case Intrinsic::fshr: {
3915 // If both operands are rotated by same amount, just compare the
3916 // original values.
3917 if (IIOp0->getOperand(0) != IIOp0->getOperand(1))
3918 break;
3919 if (IIOp1->getOperand(0) != IIOp1->getOperand(1))
3920 break;
3921 if (IIOp0->getOperand(2) == IIOp1->getOperand(2))
3922 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3923
3924 // rotate(X, AmtX) == rotate(Y, AmtY)
3925 // -> rotate(X, AmtX - AmtY) == Y
3926 // Do this if either both rotates have one use or if only one has one use
3927 // and AmtX/AmtY are constants.
3928 unsigned OneUses = IIOp0->hasOneUse() + IIOp1->hasOneUse();
3929 if (OneUses == 2 ||
3930 (OneUses == 1 && match(IIOp0->getOperand(2), m_ImmConstant()) &&
3931 match(IIOp1->getOperand(2), m_ImmConstant()))) {
3932 Value *SubAmt =
3933 Builder.CreateSub(IIOp0->getOperand(2), IIOp1->getOperand(2));
3934 Value *CombinedRotate = Builder.CreateIntrinsic(
3935 Op0->getType(), IIOp0->getIntrinsicID(),
3936 {IIOp0->getOperand(0), IIOp0->getOperand(0), SubAmt});
3937 return new ICmpInst(Pred, IIOp1->getOperand(0), CombinedRotate);
3938 }
3939 } break;
3940 default:
3941 break;
3942 }
3943
3944 return nullptr;
3945}
3946
3947/// Try to fold integer comparisons with a constant operand: icmp Pred X, C
3948/// where X is some kind of instruction and C is AllowPoison.
3949/// TODO: Move more folds which allow poison to this function.
3952 const APInt &C) {
3953 const ICmpInst::Predicate Pred = Cmp.getPredicate();
3954 if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0))) {
3955 switch (II->getIntrinsicID()) {
3956 default:
3957 break;
3958 case Intrinsic::fshl:
3959 case Intrinsic::fshr:
3960 if (Cmp.isEquality() && II->getArgOperand(0) == II->getArgOperand(1)) {
3961 // (rot X, ?) == 0/-1 --> X == 0/-1
3962 if (C.isZero() || C.isAllOnes())
3963 return new ICmpInst(Pred, II->getArgOperand(0), Cmp.getOperand(1));
3964 }
3965 break;
3966 }
3967 }
3968
3969 return nullptr;
3970}
3971
3972/// Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
3974 BinaryOperator *BO,
3975 const APInt &C) {
3976 switch (BO->getOpcode()) {
3977 case Instruction::Xor:
3978 if (Instruction *I = foldICmpXorConstant(Cmp, BO, C))
3979 return I;
3980 break;
3981 case Instruction::And:
3982 if (Instruction *I = foldICmpAndConstant(Cmp, BO, C))
3983 return I;
3984 break;
3985 case Instruction::Or:
3986 if (Instruction *I = foldICmpOrConstant(Cmp, BO, C))
3987 return I;
3988 break;
3989 case Instruction::Mul:
3990 if (Instruction *I = foldICmpMulConstant(Cmp, BO, C))
3991 return I;
3992 break;
3993 case Instruction::Shl:
3994 if (Instruction *I = foldICmpShlConstant(Cmp, BO, C))
3995 return I;
3996 break;
3997 case Instruction::LShr:
3998 case Instruction::AShr:
3999 if (Instruction *I = foldICmpShrConstant(Cmp, BO, C))
4000 return I;
4001 break;
4002 case Instruction::SRem:
4003 if (Instruction *I = foldICmpSRemConstant(Cmp, BO, C))
4004 return I;
4005 break;
4006 case Instruction::UDiv:
4007 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, C))
4008 return I;
4009 [[fallthrough]];
4010 case Instruction::SDiv:
4011 if (Instruction *I = foldICmpDivConstant(Cmp, BO, C))
4012 return I;
4013 break;
4014 case Instruction::Sub:
4015 if (Instruction *I = foldICmpSubConstant(Cmp, BO, C))
4016 return I;
4017 break;
4018 case Instruction::Add:
4019 if (Instruction *I = foldICmpAddConstant(Cmp, BO, C))
4020 return I;
4021 break;
4022 default:
4023 break;
4024 }
4025
4026 // TODO: These folds could be refactored to be part of the above calls.
4028 return I;
4029
4030 // Fall back to handling `icmp pred (select A ? C1 : C2) binop (select B ? C3
4031 // : C4), C5` pattern, by computing a truth table of the four constant
4032 // variants.
4034}
4035
4036static Instruction *
4038 const APInt &C,
4039 InstCombiner::BuilderTy &Builder) {
4040 // This transform may end up producing more than one instruction for the
4041 // intrinsic, so limit it to one user of the intrinsic.
4042 if (!II->hasOneUse())
4043 return nullptr;
4044
4045 // Let Y = [add/sub]_sat(X, C) pred C2
4046 // SatVal = The saturating value for the operation
4047 // WillWrap = Whether or not the operation will underflow / overflow
4048 // => Y = (WillWrap ? SatVal : (X binop C)) pred C2
4049 // => Y = WillWrap ? (SatVal pred C2) : ((X binop C) pred C2)
4050 //
4051 // When (SatVal pred C2) is true, then
4052 // Y = WillWrap ? true : ((X binop C) pred C2)
4053 // => Y = WillWrap || ((X binop C) pred C2)
4054 // else
4055 // Y = WillWrap ? false : ((X binop C) pred C2)
4056 // => Y = !WillWrap ? ((X binop C) pred C2) : false
4057 // => Y = !WillWrap && ((X binop C) pred C2)
4058 Value *Op0 = II->getOperand(0);
4059 Value *Op1 = II->getOperand(1);
4060
4061 const APInt *COp1;
4062 // This transform only works when the intrinsic has an integral constant or
4063 // splat vector as the second operand.
4064 if (!match(Op1, m_APInt(COp1)))
4065 return nullptr;
4066
4067 APInt SatVal;
4068 switch (II->getIntrinsicID()) {
4069 default:
4071 "This function only works with usub_sat and uadd_sat for now!");
4072 case Intrinsic::uadd_sat:
4073 SatVal = APInt::getAllOnes(C.getBitWidth());
4074 break;
4075 case Intrinsic::usub_sat:
4076 SatVal = APInt::getZero(C.getBitWidth());
4077 break;
4078 }
4079
4080 // Check (SatVal pred C2)
4081 bool SatValCheck = ICmpInst::compare(SatVal, C, Pred);
4082
4083 // !WillWrap.
4085 II->getBinaryOp(), *COp1, II->getNoWrapKind());
4086
4087 // WillWrap.
4088 if (SatValCheck)
4089 C1 = C1.inverse();
4090
4092 if (II->getBinaryOp() == Instruction::Add)
4093 C2 = C2.sub(*COp1);
4094 else
4095 C2 = C2.add(*COp1);
4096
4097 Instruction::BinaryOps CombiningOp =
4098 SatValCheck ? Instruction::BinaryOps::Or : Instruction::BinaryOps::And;
4099
4100 std::optional<ConstantRange> Combination;
4101 if (CombiningOp == Instruction::BinaryOps::Or)
4102 Combination = C1.exactUnionWith(C2);
4103 else /* CombiningOp == Instruction::BinaryOps::And */
4104 Combination = C1.exactIntersectWith(C2);
4105
4106 if (!Combination)
4107 return nullptr;
4108
4109 CmpInst::Predicate EquivPred;
4110 APInt EquivInt;
4111 APInt EquivOffset;
4112
4113 Combination->getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
4114
4115 return new ICmpInst(
4116 EquivPred,
4117 Builder.CreateAdd(Op0, ConstantInt::get(Op1->getType(), EquivOffset)),
4118 ConstantInt::get(Op1->getType(), EquivInt));
4119}
4120
4121static Instruction *
4123 const APInt &C,
4124 InstCombiner::BuilderTy &Builder) {
4125 std::optional<ICmpInst::Predicate> NewPredicate = std::nullopt;
4126 switch (Pred) {
4127 case ICmpInst::ICMP_EQ:
4128 case ICmpInst::ICMP_NE:
4129 if (C.isZero())
4130 NewPredicate = Pred;
4131 else if (C.isOne())
4132 NewPredicate =
4134 else if (C.isAllOnes())
4135 NewPredicate =
4137 break;
4138
4139 case ICmpInst::ICMP_SGT:
4140 if (C.isAllOnes())
4141 NewPredicate = ICmpInst::ICMP_UGE;
4142 else if (C.isZero())
4143 NewPredicate = ICmpInst::ICMP_UGT;
4144 break;
4145
4146 case ICmpInst::ICMP_SLT:
4147 if (C.isZero())
4148 NewPredicate = ICmpInst::ICMP_ULT;
4149 else if (C.isOne())
4150 NewPredicate = ICmpInst::ICMP_ULE;
4151 break;
4152
4153 case ICmpInst::ICMP_ULT:
4154 if (C.ugt(1))
4155 NewPredicate = ICmpInst::ICMP_UGE;
4156 break;
4157
4158 case ICmpInst::ICMP_UGT:
4159 if (!C.isZero() && !C.isAllOnes())
4160 NewPredicate = ICmpInst::ICMP_ULT;
4161 break;
4162
4163 default:
4164 break;
4165 }
4166
4167 if (!NewPredicate)
4168 return nullptr;
4169
4170 if (I->getIntrinsicID() == Intrinsic::scmp)
4171 NewPredicate = ICmpInst::getSignedPredicate(*NewPredicate);
4172 Value *LHS = I->getOperand(0);
4173 Value *RHS = I->getOperand(1);
4174 return new ICmpInst(*NewPredicate, LHS, RHS);
4175}
4176
4177/// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
4180 const APInt &C) {
4181 ICmpInst::Predicate Pred = Cmp.getPredicate();
4182
4183 // Handle folds that apply for any kind of icmp.
4184 switch (II->getIntrinsicID()) {
4185 default:
4186 break;
4187 case Intrinsic::uadd_sat:
4188 case Intrinsic::usub_sat:
4189 if (auto *Folded = foldICmpUSubSatOrUAddSatWithConstant(
4190 Pred, cast<SaturatingInst>(II), C, Builder))
4191 return Folded;
4192 break;
4193 case Intrinsic::ctpop: {
4194 const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
4195 if (Instruction *R = foldCtpopPow2Test(Cmp, II, C, Builder, Q))
4196 return R;
4197 } break;
4198 case Intrinsic::scmp:
4199 case Intrinsic::ucmp:
4200 if (auto *Folded = foldICmpOfCmpIntrinsicWithConstant(Pred, II, C, Builder))
4201 return Folded;
4202 break;
4203 }
4204
4205 if (Cmp.isEquality())
4206 return foldICmpEqIntrinsicWithConstant(Cmp, II, C);
4207
4208 Type *Ty = II->getType();
4209 unsigned BitWidth = C.getBitWidth();
4210 switch (II->getIntrinsicID()) {
4211 case Intrinsic::ctpop: {
4212 // (ctpop X > BitWidth - 1) --> X == -1
4213 Value *X = II->getArgOperand(0);
4214 if (C == BitWidth - 1 && Pred == ICmpInst::ICMP_UGT)
4215 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ, X,
4217 // (ctpop X < BitWidth) --> X != -1
4218 if (C == BitWidth && Pred == ICmpInst::ICMP_ULT)
4219 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE, X,
4221 break;
4222 }
4223 case Intrinsic::ctlz: {
4224 // ctlz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX < 0b00010000
4225 if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
4226 unsigned Num = C.getLimitedValue();
4227 APInt Limit = APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
4228 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_ULT,
4229 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4230 }
4231
4232 // ctlz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX > 0b00011111
4233 if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
4234 unsigned Num = C.getLimitedValue();
4236 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_UGT,
4237 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4238 }
4239 break;
4240 }
4241 case Intrinsic::cttz: {
4242 // Limit to one use to ensure we don't increase instruction count.
4243 if (!II->hasOneUse())
4244 return nullptr;
4245
4246 // cttz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX & 0b00001111 == 0
4247 if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
4248 APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue() + 1);
4249 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ,
4250 Builder.CreateAnd(II->getArgOperand(0), Mask),
4252 }
4253
4254 // cttz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX & 0b00000111 != 0
4255 if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
4256 APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue());
4257 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE,
4258 Builder.CreateAnd(II->getArgOperand(0), Mask),
4260 }
4261 break;
4262 }
4263 case Intrinsic::ssub_sat:
4264 // ssub.sat(a, b) spred 0 -> a spred b
4265 if (ICmpInst::isSigned(Pred)) {
4266 if (C.isZero())
4267 return new ICmpInst(Pred, II->getArgOperand(0), II->getArgOperand(1));
4268 // X s<= 0 is cannonicalized to X s< 1
4269 if (Pred == ICmpInst::ICMP_SLT && C.isOne())
4270 return new ICmpInst(ICmpInst::ICMP_SLE, II->getArgOperand(0),
4271 II->getArgOperand(1));
4272 // X s>= 0 is cannonicalized to X s> -1
4273 if (Pred == ICmpInst::ICMP_SGT && C.isAllOnes())
4274 return new ICmpInst(ICmpInst::ICMP_SGE, II->getArgOperand(0),
4275 II->getArgOperand(1));
4276 }
4277 break;
4278 default:
4279 break;
4280 }
4281
4282 return nullptr;
4283}
4284
4285/// Handle icmp with constant (but not simple integer constant) RHS.
4287 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4288 Constant *RHSC = dyn_cast<Constant>(Op1);
4290 if (!RHSC || !LHSI)
4291 return nullptr;
4292
4293 switch (LHSI->getOpcode()) {
4294 case Instruction::IntToPtr:
4295 // icmp pred inttoptr(X), null -> icmp pred X, 0
4296 if (RHSC->isNullValue() &&
4297 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
4298 return new ICmpInst(
4299 I.getPredicate(), LHSI->getOperand(0),
4301 break;
4302
4303 case Instruction::Load:
4304 // Try to optimize things like "A[i] > 4" to index computations.
4305 if (GetElementPtrInst *GEP =
4307 if (Instruction *Res =
4309 return Res;
4310 break;
4311 }
4312
4313 return nullptr;
4314}
4315
4317 Value *RHS, const ICmpInst &I) {
4318 // Try to fold the comparison into the select arms, which will cause the
4319 // select to be converted into a logical and/or.
4320 auto SimplifyOp = [&](Value *Op, bool SelectCondIsTrue) -> Value * {
4321 if (Value *Res = simplifyICmpInst(Pred, Op, RHS, SQ))
4322 return Res;
4323 if (std::optional<bool> Impl = isImpliedCondition(
4324 SI->getCondition(), Pred, Op, RHS, DL, SelectCondIsTrue))
4325 return ConstantInt::get(I.getType(), *Impl);
4326 return nullptr;
4327 };
4328
4329 ConstantInt *CI = nullptr;
4330 Value *Op1 = SimplifyOp(SI->getOperand(1), true);
4331 if (Op1)
4332 CI = dyn_cast<ConstantInt>(Op1);
4333
4334 Value *Op2 = SimplifyOp(SI->getOperand(2), false);
4335 if (Op2)
4336 CI = dyn_cast<ConstantInt>(Op2);
4337
4338 auto Simplifies = [&](Value *Op, unsigned Idx) {
4339 // A comparison of ucmp/scmp with a constant will fold into an icmp.
4340 const APInt *Dummy;
4341 return Op ||
4342 (isa<CmpIntrinsic>(SI->getOperand(Idx)) &&
4343 SI->getOperand(Idx)->hasOneUse() && match(RHS, m_APInt(Dummy)));
4344 };
4345
4346 // We only want to perform this transformation if it will not lead to
4347 // additional code. This is true if either both sides of the select
4348 // fold to a constant (in which case the icmp is replaced with a select
4349 // which will usually simplify) or this is the only user of the
4350 // select (in which case we are trading a select+icmp for a simpler
4351 // select+icmp) or all uses of the select can be replaced based on
4352 // dominance information ("Global cases").
4353 bool Transform = false;
4354 if (Op1 && Op2)
4355 Transform = true;
4356 else if (Simplifies(Op1, 1) || Simplifies(Op2, 2)) {
4357 // Local case
4358 if (SI->hasOneUse())
4359 Transform = true;
4360 // Global cases
4361 else if (CI && !CI->isZero())
4362 // When Op1 is constant try replacing select with second operand.
4363 // Otherwise Op2 is constant and try replacing select with first
4364 // operand.
4365 Transform = replacedSelectWithOperand(SI, &I, Op1 ? 2 : 1);
4366 }
4367 if (Transform) {
4368 if (!Op1)
4369 Op1 = Builder.CreateICmp(Pred, SI->getOperand(1), RHS, I.getName());
4370 if (!Op2)
4371 Op2 = Builder.CreateICmp(Pred, SI->getOperand(2), RHS, I.getName());
4372 return SelectInst::Create(SI->getOperand(0), Op1, Op2);
4373 }
4374
4375 return nullptr;
4376}
4377
4378// Returns whether V is a Mask ((X + 1) & X == 0) or ~Mask (-Pow2OrZero)
4379static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q,
4380 unsigned Depth = 0) {
4381 if (Not ? match(V, m_NegatedPower2OrZero()) : match(V, m_LowBitMaskOrZero()))
4382 return true;
4383 if (V->getType()->getScalarSizeInBits() == 1)
4384 return true;
4386 return false;
4387 Value *X;
4389 if (!I)
4390 return false;
4391 switch (I->getOpcode()) {
4392 case Instruction::ZExt:
4393 // ZExt(Mask) is a Mask.
4394 return !Not && isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4395 case Instruction::SExt:
4396 // SExt(Mask) is a Mask.
4397 // SExt(~Mask) is a ~Mask.
4398 return isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4399 case Instruction::And:
4400 case Instruction::Or:
4401 // Mask0 | Mask1 is a Mask.
4402 // Mask0 & Mask1 is a Mask.
4403 // ~Mask0 | ~Mask1 is a ~Mask.
4404 // ~Mask0 & ~Mask1 is a ~Mask.
4405 return isMaskOrZero(I->getOperand(1), Not, Q, Depth) &&
4406 isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4407 case Instruction::Xor:
4408 if (match(V, m_Not(m_Value(X))))
4409 return isMaskOrZero(X, !Not, Q, Depth);
4410
4411 // (X ^ -X) is a ~Mask
4412 if (Not)
4413 return match(V, m_c_Xor(m_Value(X), m_Neg(m_Deferred(X))));
4414 // (X ^ (X - 1)) is a Mask
4415 else
4416 return match(V, m_c_Xor(m_Value(X), m_Add(m_Deferred(X), m_AllOnes())));
4417 case Instruction::Select:
4418 // c ? Mask0 : Mask1 is a Mask.
4419 return isMaskOrZero(I->getOperand(1), Not, Q, Depth) &&
4420 isMaskOrZero(I->getOperand(2), Not, Q, Depth);
4421 case Instruction::Shl:
4422 // (~Mask) << X is a ~Mask.
4423 return Not && isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4424 case Instruction::LShr:
4425 // Mask >> X is a Mask.
4426 return !Not && isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4427 case Instruction::AShr:
4428 // Mask s>> X is a Mask.
4429 // ~Mask s>> X is a ~Mask.
4430 return isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4431 case Instruction::Add:
4432 // Pow2 - 1 is a Mask.
4433 if (!Not && match(I->getOperand(1), m_AllOnes()))
4434 return isKnownToBeAPowerOfTwo(I->getOperand(0), Q.DL, /*OrZero*/ true,
4435 Q.AC, Q.CxtI, Q.DT, Depth);
4436 break;
4437 case Instruction::Sub:
4438 // -Pow2 is a ~Mask.
4439 if (Not && match(I->getOperand(0), m_Zero()))
4440 return isKnownToBeAPowerOfTwo(I->getOperand(1), Q.DL, /*OrZero*/ true,
4441 Q.AC, Q.CxtI, Q.DT, Depth);
4442 break;
4443 case Instruction::Call: {
4444 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
4445 switch (II->getIntrinsicID()) {
4446 // min/max(Mask0, Mask1) is a Mask.
4447 // min/max(~Mask0, ~Mask1) is a ~Mask.
4448 case Intrinsic::umax:
4449 case Intrinsic::smax:
4450 case Intrinsic::umin:
4451 case Intrinsic::smin:
4452 return isMaskOrZero(II->getArgOperand(1), Not, Q, Depth) &&
4453 isMaskOrZero(II->getArgOperand(0), Not, Q, Depth);
4454
4455 // In the context of masks, bitreverse(Mask) == ~Mask
4456 case Intrinsic::bitreverse:
4457 return isMaskOrZero(II->getArgOperand(0), !Not, Q, Depth);
4458 default:
4459 break;
4460 }
4461 }
4462 break;
4463 }
4464 default:
4465 break;
4466 }
4467 return false;
4468}
4469
4470/// Some comparisons can be simplified.
4471/// In this case, we are looking for comparisons that look like
4472/// a check for a lossy truncation.
4473/// Folds:
4474/// icmp SrcPred (x & Mask), x to icmp DstPred x, Mask
4475/// icmp SrcPred (x & ~Mask), ~Mask to icmp DstPred x, ~Mask
4476/// icmp eq/ne (x & ~Mask), 0 to icmp DstPred x, Mask
4477/// icmp eq/ne (~x | Mask), -1 to icmp DstPred x, Mask
4478/// Where Mask is some pattern that produces all-ones in low bits:
4479/// (-1 >> y)
4480/// ((-1 << y) >> y) <- non-canonical, has extra uses
4481/// ~(-1 << y)
4482/// ((1 << y) + (-1)) <- non-canonical, has extra uses
4483/// The Mask can be a constant, too.
4484/// For some predicates, the operands are commutative.
4485/// For others, x can only be on a specific side.
4487 Value *Op1, const SimplifyQuery &Q,
4488 InstCombiner &IC) {
4489
4490 ICmpInst::Predicate DstPred;
4491 switch (Pred) {
4493 // x & Mask == x
4494 // x & ~Mask == 0
4495 // ~x | Mask == -1
4496 // -> x u<= Mask
4497 // x & ~Mask == ~Mask
4498 // -> ~Mask u<= x
4500 break;
4502 // x & Mask != x
4503 // x & ~Mask != 0
4504 // ~x | Mask != -1
4505 // -> x u> Mask
4506 // x & ~Mask != ~Mask
4507 // -> ~Mask u> x
4509 break;
4511 // x & Mask u< x
4512 // -> x u> Mask
4513 // x & ~Mask u< ~Mask
4514 // -> ~Mask u> x
4516 break;
4518 // x & Mask u>= x
4519 // -> x u<= Mask
4520 // x & ~Mask u>= ~Mask
4521 // -> ~Mask u<= x
4523 break;
4525 // x & Mask s< x [iff Mask s>= 0]
4526 // -> x s> Mask
4527 // x & ~Mask s< ~Mask [iff ~Mask != 0]
4528 // -> ~Mask s> x
4530 break;
4532 // x & Mask s>= x [iff Mask s>= 0]
4533 // -> x s<= Mask
4534 // x & ~Mask s>= ~Mask [iff ~Mask != 0]
4535 // -> ~Mask s<= x
4537 break;
4538 default:
4539 // We don't support sgt,sle
4540 // ult/ugt are simplified to true/false respectively.
4541 return nullptr;
4542 }
4543
4544 Value *X, *M;
4545 // Put search code in lambda for early positive returns.
4546 auto IsLowBitMask = [&]() {
4547 if (match(Op0, m_c_And(m_Specific(Op1), m_Value(M)))) {
4548 X = Op1;
4549 // Look for: x & Mask pred x
4550 if (isMaskOrZero(M, /*Not=*/false, Q)) {
4551 return !ICmpInst::isSigned(Pred) ||
4552 (match(M, m_NonNegative()) || isKnownNonNegative(M, Q));
4553 }
4554
4555 // Look for: x & ~Mask pred ~Mask
4556 if (isMaskOrZero(X, /*Not=*/true, Q)) {
4557 return !ICmpInst::isSigned(Pred) || isKnownNonZero(X, Q);
4558 }
4559 return false;
4560 }
4561 if (ICmpInst::isEquality(Pred) && match(Op1, m_AllOnes()) &&
4562 match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(M))))) {
4563
4564 auto Check = [&]() {
4565 // Look for: ~x | Mask == -1
4566 if (isMaskOrZero(M, /*Not=*/false, Q)) {
4567 if (Value *NotX =
4568 IC.getFreelyInverted(X, X->hasOneUse(), &IC.Builder)) {
4569 X = NotX;
4570 return true;
4571 }
4572 }
4573 return false;
4574 };
4575 if (Check())
4576 return true;
4577 std::swap(X, M);
4578 return Check();
4579 }
4580 if (ICmpInst::isEquality(Pred) && match(Op1, m_Zero()) &&
4581 match(Op0, m_OneUse(m_And(m_Value(X), m_Value(M))))) {
4582 auto Check = [&]() {
4583 // Look for: x & ~Mask == 0
4584 if (isMaskOrZero(M, /*Not=*/true, Q)) {
4585 if (Value *NotM =
4586 IC.getFreelyInverted(M, M->hasOneUse(), &IC.Builder)) {
4587 M = NotM;
4588 return true;
4589 }
4590 }
4591 return false;
4592 };
4593 if (Check())
4594 return true;
4595 std::swap(X, M);
4596 return Check();
4597 }
4598 return false;
4599 };
4600
4601 if (!IsLowBitMask())
4602 return nullptr;
4603
4604 return IC.Builder.CreateICmp(DstPred, X, M);
4605}
4606
4607/// Some comparisons can be simplified.
4608/// In this case, we are looking for comparisons that look like
4609/// a check for a lossy signed truncation.
4610/// Folds: (MaskedBits is a constant.)
4611/// ((%x << MaskedBits) a>> MaskedBits) SrcPred %x
4612/// Into:
4613/// (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits)
4614/// Where KeptBits = bitwidth(%x) - MaskedBits
4615static Value *
4617 InstCombiner::BuilderTy &Builder) {
4618 CmpPredicate SrcPred;
4619 Value *X;
4620 const APInt *C0, *C1; // FIXME: non-splats, potentially with undef.
4621 // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use.
4622 if (!match(&I, m_c_ICmp(SrcPred,
4624 m_APInt(C1))),
4625 m_Deferred(X))))
4626 return nullptr;
4627
4628 // Potential handling of non-splats: for each element:
4629 // * if both are undef, replace with constant 0.
4630 // Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
4631 // * if both are not undef, and are different, bailout.
4632 // * else, only one is undef, then pick the non-undef one.
4633
4634 // The shift amount must be equal.
4635 if (*C0 != *C1)
4636 return nullptr;
4637 const APInt &MaskedBits = *C0;
4638 assert(MaskedBits != 0 && "shift by zero should be folded away already.");
4639
4640 ICmpInst::Predicate DstPred;
4641 switch (SrcPred) {
4643 // ((%x << MaskedBits) a>> MaskedBits) == %x
4644 // =>
4645 // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
4647 break;
4649 // ((%x << MaskedBits) a>> MaskedBits) != %x
4650 // =>
4651 // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits)
4653 break;
4654 // FIXME: are more folds possible?
4655 default:
4656 return nullptr;
4657 }
4658
4659 auto *XType = X->getType();
4660 const unsigned XBitWidth = XType->getScalarSizeInBits();
4661 const APInt BitWidth = APInt(XBitWidth, XBitWidth);
4662 assert(BitWidth.ugt(MaskedBits) && "shifts should leave some bits untouched");
4663
4664 // KeptBits = bitwidth(%x) - MaskedBits
4665 const APInt KeptBits = BitWidth - MaskedBits;
4666 assert(KeptBits.ugt(0) && KeptBits.ult(BitWidth) && "unreachable");
4667 // ICmpCst = (1 << KeptBits)
4668 const APInt ICmpCst = APInt(XBitWidth, 1).shl(KeptBits);
4669 assert(ICmpCst.isPowerOf2());
4670 // AddCst = (1 << (KeptBits-1))
4671 const APInt AddCst = ICmpCst.lshr(1);
4672 assert(AddCst.ult(ICmpCst) && AddCst.isPowerOf2());
4673
4674 // T0 = add %x, AddCst
4675 Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst));
4676 // T1 = T0 DstPred ICmpCst
4677 Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
4678
4679 return T1;
4680}
4681
4682// Given pattern:
4683// icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
4684// we should move shifts to the same hand of 'and', i.e. rewrite as
4685// icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x)
4686// We are only interested in opposite logical shifts here.
4687// One of the shifts can be truncated.
4688// If we can, we want to end up creating 'lshr' shift.
4689static Value *
4691 InstCombiner::BuilderTy &Builder) {
4692 if (!I.isEquality() || !match(I.getOperand(1), m_Zero()) ||
4693 !I.getOperand(0)->hasOneUse())
4694 return nullptr;
4695
4696 auto m_AnyLogicalShift = m_LogicalShift(m_Value(), m_Value());
4697
4698 // Look for an 'and' of two logical shifts, one of which may be truncated.
4699 // We use m_TruncOrSelf() on the RHS to correctly handle commutative case.
4700 Instruction *XShift, *MaybeTruncation, *YShift;
4701 if (!match(
4702 I.getOperand(0),
4703 m_c_And(m_CombineAnd(m_AnyLogicalShift, m_Instruction(XShift)),
4705 m_AnyLogicalShift, m_Instruction(YShift))),
4706 m_Instruction(MaybeTruncation)))))
4707 return nullptr;
4708
4709 // We potentially looked past 'trunc', but only when matching YShift,
4710 // therefore YShift must have the widest type.
4711 Instruction *WidestShift = YShift;
4712 // Therefore XShift must have the shallowest type.
4713 // Or they both have identical types if there was no truncation.
4714 Instruction *NarrowestShift = XShift;
4715
4716 Type *WidestTy = WidestShift->getType();
4717 Type *NarrowestTy = NarrowestShift->getType();
4718 assert(NarrowestTy == I.getOperand(0)->getType() &&
4719 "We did not look past any shifts while matching XShift though.");
4720 bool HadTrunc = WidestTy != I.getOperand(0)->getType();
4721
4722 // If YShift is a 'lshr', swap the shifts around.
4723 if (match(YShift, m_LShr(m_Value(), m_Value())))
4724 std::swap(XShift, YShift);
4725
4726 // The shifts must be in opposite directions.
4727 auto XShiftOpcode = XShift->getOpcode();
4728 if (XShiftOpcode == YShift->getOpcode())
4729 return nullptr; // Do not care about same-direction shifts here.
4730
4731 Value *X, *XShAmt, *Y, *YShAmt;
4732 match(XShift, m_BinOp(m_Value(X), m_ZExtOrSelf(m_Value(XShAmt))));
4733 match(YShift, m_BinOp(m_Value(Y), m_ZExtOrSelf(m_Value(YShAmt))));
4734
4735 // If one of the values being shifted is a constant, then we will end with
4736 // and+icmp, and [zext+]shift instrs will be constant-folded. If they are not,
4737 // however, we will need to ensure that we won't increase instruction count.
4738 if (!isa<Constant>(X) && !isa<Constant>(Y)) {
4739 // At least one of the hands of the 'and' should be one-use shift.
4740 if (!match(I.getOperand(0),
4741 m_c_And(m_OneUse(m_AnyLogicalShift), m_Value())))
4742 return nullptr;
4743 if (HadTrunc) {
4744 // Due to the 'trunc', we will need to widen X. For that either the old
4745 // 'trunc' or the shift amt in the non-truncated shift should be one-use.
4746 if (!MaybeTruncation->hasOneUse() &&
4747 !NarrowestShift->getOperand(1)->hasOneUse())
4748 return nullptr;
4749 }
4750 }
4751
4752 // We have two shift amounts from two different shifts. The types of those
4753 // shift amounts may not match. If that's the case let's bailout now.
4754 if (XShAmt->getType() != YShAmt->getType())
4755 return nullptr;
4756
4757 // As input, we have the following pattern:
4758 // icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
4759 // We want to rewrite that as:
4760 // icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x)
4761 // While we know that originally (Q+K) would not overflow
4762 // (because 2 * (N-1) u<= iN -1), we have looked past extensions of
4763 // shift amounts. so it may now overflow in smaller bitwidth.
4764 // To ensure that does not happen, we need to ensure that the total maximal
4765 // shift amount is still representable in that smaller bit width.
4766 unsigned MaximalPossibleTotalShiftAmount =
4767 (WidestTy->getScalarSizeInBits() - 1) +
4768 (NarrowestTy->getScalarSizeInBits() - 1);
4769 APInt MaximalRepresentableShiftAmount =
4771 if (MaximalRepresentableShiftAmount.ult(MaximalPossibleTotalShiftAmount))
4772 return nullptr;
4773
4774 // Can we fold (XShAmt+YShAmt) ?
4775 auto *NewShAmt = dyn_cast_or_null<Constant>(
4776 simplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false,
4777 /*isNUW=*/false, SQ.getWithInstruction(&I)));
4778 if (!NewShAmt)
4779 return nullptr;
4780 if (NewShAmt->getType() != WidestTy) {
4781 NewShAmt =
4782 ConstantFoldCastOperand(Instruction::ZExt, NewShAmt, WidestTy, SQ.DL);
4783 if (!NewShAmt)
4784 return nullptr;
4785 }
4786 unsigned WidestBitWidth = WidestTy->getScalarSizeInBits();
4787
4788 // Is the new shift amount smaller than the bit width?
4789 // FIXME: could also rely on ConstantRange.
4790 if (!match(NewShAmt,
4792 APInt(WidestBitWidth, WidestBitWidth))))
4793 return nullptr;
4794
4795 // An extra legality check is needed if we had trunc-of-lshr.
4796 if (HadTrunc && match(WidestShift, m_LShr(m_Value(), m_Value()))) {
4797 auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
4798 WidestShift]() {
4799 // It isn't obvious whether it's worth it to analyze non-constants here.
4800 // Also, let's basically give up on non-splat cases, pessimizing vectors.
4801 // If *any* of these preconditions matches we can perform the fold.
4802 Constant *NewShAmtSplat = NewShAmt->getType()->isVectorTy()
4803 ? NewShAmt->getSplatValue()
4804 : NewShAmt;
4805 // If it's edge-case shift (by 0 or by WidestBitWidth-1) we can fold.
4806 if (NewShAmtSplat &&
4807 (NewShAmtSplat->isNullValue() ||
4808 NewShAmtSplat->getUniqueInteger() == WidestBitWidth - 1))
4809 return true;
4810 // We consider *min* leading zeros so a single outlier
4811 // blocks the transform as opposed to allowing it.
4812 if (auto *C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
4813 KnownBits Known = computeKnownBits(C, SQ.DL);
4814 unsigned MinLeadZero = Known.countMinLeadingZeros();
4815 // If the value being shifted has at most lowest bit set we can fold.
4816 unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
4817 if (MaxActiveBits <= 1)
4818 return true;
4819 // Precondition: NewShAmt u<= countLeadingZeros(C)
4820 if (NewShAmtSplat && NewShAmtSplat->getUniqueInteger().ule(MinLeadZero))
4821 return true;
4822 }
4823 if (auto *C = dyn_cast<Constant>(WidestShift->getOperand(0))) {
4824 KnownBits Known = computeKnownBits(C, SQ.DL);
4825 unsigned MinLeadZero = Known.countMinLeadingZeros();
4826 // If the value being shifted has at most lowest bit set we can fold.
4827 unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
4828 if (MaxActiveBits <= 1)
4829 return true;
4830 // Precondition: ((WidestBitWidth-1)-NewShAmt) u<= countLeadingZeros(C)
4831 if (NewShAmtSplat) {
4832 APInt AdjNewShAmt =
4833 (WidestBitWidth - 1) - NewShAmtSplat->getUniqueInteger();
4834 if (AdjNewShAmt.ule(MinLeadZero))
4835 return true;
4836 }
4837 }
4838 return false; // Can't tell if it's ok.
4839 };
4840 if (!CanFold())
4841 return nullptr;
4842 }
4843
4844 // All good, we can do this fold.
4845 X = Builder.CreateZExt(X, WidestTy);
4846 Y = Builder.CreateZExt(Y, WidestTy);
4847 // The shift is the same that was for X.
4848 Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
4849 ? Builder.CreateLShr(X, NewShAmt)
4850 : Builder.CreateShl(X, NewShAmt);
4851 Value *T1 = Builder.CreateAnd(T0, Y);
4852 return Builder.CreateICmp(I.getPredicate(), T1,
4853 Constant::getNullValue(WidestTy));
4854}
4855
4856/// Fold
4857/// (-1 u/ x) u< y
4858/// ((x * y) ?/ x) != y
4859/// to
4860/// @llvm.?mul.with.overflow(x, y) plus extraction of overflow bit
4861/// Note that the comparison is commutative, while inverted (u>=, ==) predicate
4862/// will mean that we are looking for the opposite answer.
4864 CmpPredicate Pred;
4865 Value *X, *Y;
4867 Instruction *Div;
4868 bool NeedNegation;
4869 // Look for: (-1 u/ x) u</u>= y
4870 if (!I.isEquality() &&
4871 match(&I, m_c_ICmp(Pred,
4873 m_Instruction(Div)),
4874 m_Value(Y)))) {
4875 Mul = nullptr;
4876
4877 // Are we checking that overflow does not happen, or does happen?
4878 switch (Pred) {
4880 NeedNegation = false;
4881 break; // OK
4883 NeedNegation = true;
4884 break; // OK
4885 default:
4886 return nullptr; // Wrong predicate.
4887 }
4888 } else // Look for: ((x * y) / x) !=/== y
4889 if (I.isEquality() &&
4890 match(&I, m_c_ICmp(Pred, m_Value(Y),
4893 m_Value(X)),
4895 m_Deferred(X))),
4896 m_Instruction(Div))))) {
4897 NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ;
4898 } else
4899 return nullptr;
4900
4902 // If the pattern included (x * y), we'll want to insert new instructions
4903 // right before that original multiplication so that we can replace it.
4904 bool MulHadOtherUses = Mul && !Mul->hasOneUse();
4905 if (MulHadOtherUses)
4906 Builder.SetInsertPoint(Mul);
4907
4908 CallInst *Call = Builder.CreateIntrinsic(
4909 Div->getOpcode() == Instruction::UDiv ? Intrinsic::umul_with_overflow
4910 : Intrinsic::smul_with_overflow,
4911 X->getType(), {X, Y}, /*FMFSource=*/nullptr, "mul");
4912
4913 // If the multiplication was used elsewhere, to ensure that we don't leave
4914 // "duplicate" instructions, replace uses of that original multiplication
4915 // with the multiplication result from the with.overflow intrinsic.
4916 if (MulHadOtherUses)
4917 replaceInstUsesWith(*Mul, Builder.CreateExtractValue(Call, 0, "mul.val"));
4918
4919 Value *Res = Builder.CreateExtractValue(Call, 1, "mul.ov");
4920 if (NeedNegation) // This technically increases instruction count.
4921 Res = Builder.CreateNot(Res, "mul.not.ov");
4922
4923 // If we replaced the mul, erase it. Do this after all uses of Builder,
4924 // as the mul is used as insertion point.
4925 if (MulHadOtherUses)
4927
4928 return Res;
4929}
4930
4932 InstCombiner::BuilderTy &Builder) {
4933 CmpPredicate Pred;
4934 Value *X;
4935 if (match(&I, m_c_ICmp(Pred, m_NSWNeg(m_Value(X)), m_Deferred(X)))) {
4936
4937 if (ICmpInst::isSigned(Pred))
4938 Pred = ICmpInst::getSwappedPredicate(Pred);
4939 else if (ICmpInst::isUnsigned(Pred))
4940 Pred = ICmpInst::getSignedPredicate(Pred);
4941 // else for equality-comparisons just keep the predicate.
4942
4943 return ICmpInst::Create(Instruction::ICmp, Pred, X,
4944 Constant::getNullValue(X->getType()), I.getName());
4945 }
4946
4947 // A value is not equal to its negation unless that value is 0 or
4948 // MinSignedValue, ie: a != -a --> (a & MaxSignedVal) != 0
4949 if (match(&I, m_c_ICmp(Pred, m_OneUse(m_Neg(m_Value(X))), m_Deferred(X))) &&
4950 ICmpInst::isEquality(Pred)) {
4951 Type *Ty = X->getType();
4952 uint32_t BitWidth = Ty->getScalarSizeInBits();
4953 Constant *MaxSignedVal =
4954 ConstantInt::get(Ty, APInt::getSignedMaxValue(BitWidth));
4955 Value *And = Builder.CreateAnd(X, MaxSignedVal);
4956 Constant *Zero = Constant::getNullValue(Ty);
4957 return CmpInst::Create(Instruction::ICmp, Pred, And, Zero);
4958 }
4959
4960 return nullptr;
4961}
4962
4964 InstCombinerImpl &IC) {
4965 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *A;
4966 // Normalize and operand as operand 0.
4967 CmpInst::Predicate Pred = I.getPredicate();
4968 if (match(Op1, m_c_And(m_Specific(Op0), m_Value()))) {
4969 std::swap(Op0, Op1);
4970 Pred = ICmpInst::getSwappedPredicate(Pred);
4971 }
4972
4973 if (!match(Op0, m_c_And(m_Specific(Op1), m_Value(A))))
4974 return nullptr;
4975
4976 // (icmp (X & Y) u< X --> (X & Y) != X
4977 if (Pred == ICmpInst::ICMP_ULT)
4978 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4979
4980 // (icmp (X & Y) u>= X --> (X & Y) == X
4981 if (Pred == ICmpInst::ICMP_UGE)
4982 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
4983
4984 if (ICmpInst::isEquality(Pred) && Op0->hasOneUse()) {
4985 // icmp (X & Y) eq/ne Y --> (X | ~Y) eq/ne -1 if Y is freely invertible and
4986 // Y is non-constant. If Y is constant the `X & C == C` form is preferable
4987 // so don't do this fold.
4988 if (!match(Op1, m_ImmConstant()))
4989 if (auto *NotOp1 =
4990 IC.getFreelyInverted(Op1, !Op1->hasNUsesOrMore(3), &IC.Builder))
4991 return new ICmpInst(Pred, IC.Builder.CreateOr(A, NotOp1),
4992 Constant::getAllOnesValue(Op1->getType()));
4993 // icmp (X & Y) eq/ne Y --> (~X & Y) eq/ne 0 if X is freely invertible.
4994 if (auto *NotA = IC.getFreelyInverted(A, A->hasOneUse(), &IC.Builder))
4995 return new ICmpInst(Pred, IC.Builder.CreateAnd(Op1, NotA),
4996 Constant::getNullValue(Op1->getType()));
4997 }
4998
4999 if (!ICmpInst::isSigned(Pred))
5000 return nullptr;
5001
5002 KnownBits KnownY = IC.computeKnownBits(A, &I);
5003 // (X & NegY) spred X --> (X & NegY) upred X
5004 if (KnownY.isNegative())
5005 return new ICmpInst(ICmpInst::getUnsignedPredicate(Pred), Op0, Op1);
5006
5007 if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGT)
5008 return nullptr;
5009
5010 if (KnownY.isNonNegative())
5011 // (X & PosY) s<= X --> X s>= 0
5012 // (X & PosY) s> X --> X s< 0
5013 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
5014 Constant::getNullValue(Op1->getType()));
5015
5017 // (NegX & Y) s<= NegX --> Y s< 0
5018 // (NegX & Y) s> NegX --> Y s>= 0
5020 Constant::getNullValue(A->getType()));
5021
5022 return nullptr;
5023}
5024
5026 InstCombinerImpl &IC) {
5027 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *A;
5028
5029 // Normalize or operand as operand 0.
5030 CmpInst::Predicate Pred = I.getPredicate();
5031 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value(A)))) {
5032 std::swap(Op0, Op1);
5033 Pred = ICmpInst::getSwappedPredicate(Pred);
5034 } else if (!match(Op0, m_c_Or(m_Specific(Op1), m_Value(A)))) {
5035 return nullptr;
5036 }
5037
5038 // icmp (X | Y) u<= X --> (X | Y) == X
5039 if (Pred == ICmpInst::ICMP_ULE)
5040 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5041
5042 // icmp (X | Y) u> X --> (X | Y) != X
5043 if (Pred == ICmpInst::ICMP_UGT)
5044 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5045
5046 if (ICmpInst::isEquality(Pred) && Op0->hasOneUse()) {
5047 // icmp (X | Y) eq/ne Y --> (X & ~Y) eq/ne 0 if Y is freely invertible
5048 if (Value *NotOp1 = IC.getFreelyInverted(
5049 Op1, !isa<Constant>(Op1) && !Op1->hasNUsesOrMore(3), &IC.Builder))
5050 return new ICmpInst(Pred, IC.Builder.CreateAnd(A, NotOp1),
5051 Constant::getNullValue(Op1->getType()));
5052 // icmp (X | Y) eq/ne Y --> (~X | Y) eq/ne -1 if X is freely invertible.
5053 if (Value *NotA = IC.getFreelyInverted(A, A->hasOneUse(), &IC.Builder))
5054 return new ICmpInst(Pred, IC.Builder.CreateOr(Op1, NotA),
5055 Constant::getAllOnesValue(Op1->getType()));
5056 }
5057 return nullptr;
5058}
5059
5061 InstCombinerImpl &IC) {
5062 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *A;
5063 // Normalize xor operand as operand 0.
5064 CmpInst::Predicate Pred = I.getPredicate();
5065 if (match(Op1, m_c_Xor(m_Specific(Op0), m_Value()))) {
5066 std::swap(Op0, Op1);
5067 Pred = ICmpInst::getSwappedPredicate(Pred);
5068 }
5069 if (!match(Op0, m_c_Xor(m_Specific(Op1), m_Value(A))))
5070 return nullptr;
5071
5072 // icmp (X ^ Y_NonZero) u>= X --> icmp (X ^ Y_NonZero) u> X
5073 // icmp (X ^ Y_NonZero) u<= X --> icmp (X ^ Y_NonZero) u< X
5074 // icmp (X ^ Y_NonZero) s>= X --> icmp (X ^ Y_NonZero) s> X
5075 // icmp (X ^ Y_NonZero) s<= X --> icmp (X ^ Y_NonZero) s< X
5077 if (PredOut != Pred && isKnownNonZero(A, Q))
5078 return new ICmpInst(PredOut, Op0, Op1);
5079
5080 // These transform work when A is negative.
5081 // X s< X^A, X s<= X^A, X u> X^A, X u>= X^A --> X s< 0
5082 // X s> X^A, X s>= X^A, X u< X^A, X u<= X^A --> X s>= 0
5083 if (match(A, m_Negative())) {
5084 CmpInst::Predicate NewPred;
5085 switch (ICmpInst::getStrictPredicate(Pred)) {
5086 default:
5087 return nullptr;
5088 case ICmpInst::ICMP_SLT:
5089 case ICmpInst::ICMP_UGT:
5090 NewPred = ICmpInst::ICMP_SLT;
5091 break;
5092 case ICmpInst::ICMP_SGT:
5093 case ICmpInst::ICMP_ULT:
5094 NewPred = ICmpInst::ICMP_SGE;
5095 break;
5096 }
5097 Constant *Const = Constant::getNullValue(Op0->getType());
5098 return new ICmpInst(NewPred, Op0, Const);
5099 }
5100
5101 return nullptr;
5102}
5103
5104/// Return true if X is a multiple of C.
5105/// TODO: Handle non-power-of-2 factors.
5106static bool isMultipleOf(Value *X, const APInt &C, const SimplifyQuery &Q) {
5107 if (C.isOne())
5108 return true;
5109
5110 if (!C.isPowerOf2())
5111 return false;
5112
5113 return MaskedValueIsZero(X, C - 1, Q);
5114}
5115
5116/// Try to fold icmp (binop), X or icmp X, (binop).
5117/// TODO: A large part of this logic is duplicated in InstSimplify's
5118/// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
5119/// duplication.
5121 const SimplifyQuery &SQ) {
5122 const SimplifyQuery Q = SQ.getWithInstruction(&I);
5123 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5124
5125 // Special logic for binary operators.
5128 if (!BO0 && !BO1)
5129 return nullptr;
5130
5131 if (Instruction *NewICmp = foldICmpXNegX(I, Builder))
5132 return NewICmp;
5133
5134 const CmpInst::Predicate Pred = I.getPredicate();
5135 Value *X;
5136
5137 // Convert add-with-unsigned-overflow comparisons into a 'not' with compare.
5138 // (Op1 + X) u</u>= Op1 --> ~Op1 u</u>= X
5139 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Op1), m_Value(X)))) &&
5140 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
5141 return new ICmpInst(Pred, Builder.CreateNot(Op1), X);
5142 // Op0 u>/u<= (Op0 + X) --> X u>/u<= ~Op0
5143 if (match(Op1, m_OneUse(m_c_Add(m_Specific(Op0), m_Value(X)))) &&
5144 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
5145 return new ICmpInst(Pred, X, Builder.CreateNot(Op0));
5146
5147 {
5148 // (Op1 + X) + C u</u>= Op1 --> ~C - X u</u>= Op1
5149 Constant *C;
5150 if (match(Op0, m_OneUse(m_Add(m_c_Add(m_Specific(Op1), m_Value(X)),
5151 m_ImmConstant(C)))) &&
5152 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
5154 return new ICmpInst(Pred, Builder.CreateSub(C2, X), Op1);
5155 }
5156 // Op0 u>/u<= (Op0 + X) + C --> Op0 u>/u<= ~C - X
5157 if (match(Op1, m_OneUse(m_Add(m_c_Add(m_Specific(Op0), m_Value(X)),
5158 m_ImmConstant(C)))) &&
5159 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE)) {
5161 return new ICmpInst(Pred, Op0, Builder.CreateSub(C2, X));
5162 }
5163 }
5164
5165 // (icmp eq/ne (X, -P2), INT_MIN)
5166 // -> (icmp slt/sge X, INT_MIN + P2)
5167 if (ICmpInst::isEquality(Pred) && BO0 &&
5168 match(I.getOperand(1), m_SignMask()) &&
5170 // Will Constant fold.
5171 Value *NewC = Builder.CreateSub(I.getOperand(1), BO0->getOperand(1));
5172 return new ICmpInst(Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_SLT
5174 BO0->getOperand(0), NewC);
5175 }
5176
5177 {
5178 // Similar to above: an unsigned overflow comparison may use offset + mask:
5179 // ((Op1 + C) & C) u< Op1 --> Op1 != 0
5180 // ((Op1 + C) & C) u>= Op1 --> Op1 == 0
5181 // Op0 u> ((Op0 + C) & C) --> Op0 != 0
5182 // Op0 u<= ((Op0 + C) & C) --> Op0 == 0
5183 BinaryOperator *BO;
5184 const APInt *C;
5185 if ((Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE) &&
5186 match(Op0, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
5188 CmpInst::Predicate NewPred =
5190 Constant *Zero = ConstantInt::getNullValue(Op1->getType());
5191 return new ICmpInst(NewPred, Op1, Zero);
5192 }
5193
5194 if ((Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE) &&
5195 match(Op1, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
5197 CmpInst::Predicate NewPred =
5199 Constant *Zero = ConstantInt::getNullValue(Op1->getType());
5200 return new ICmpInst(NewPred, Op0, Zero);
5201 }
5202 }
5203
5204 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
5205 bool Op0HasNUW = false, Op1HasNUW = false;
5206 bool Op0HasNSW = false, Op1HasNSW = false;
5207 // Analyze the case when either Op0 or Op1 is an add instruction.
5208 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
5209 auto hasNoWrapProblem = [](const BinaryOperator &BO, CmpInst::Predicate Pred,
5210 bool &HasNSW, bool &HasNUW) -> bool {
5212 HasNUW = BO.hasNoUnsignedWrap();
5213 HasNSW = BO.hasNoSignedWrap();
5214 return ICmpInst::isEquality(Pred) ||
5215 (CmpInst::isUnsigned(Pred) && HasNUW) ||
5216 (CmpInst::isSigned(Pred) && HasNSW);
5217 } else if (BO.getOpcode() == Instruction::Or) {
5218 HasNUW = true;
5219 HasNSW = true;
5220 return true;
5221 } else {
5222 return false;
5223 }
5224 };
5225 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
5226
5227 if (BO0) {
5228 match(BO0, m_AddLike(m_Value(A), m_Value(B)));
5229 NoOp0WrapProblem = hasNoWrapProblem(*BO0, Pred, Op0HasNSW, Op0HasNUW);
5230 }
5231 if (BO1) {
5232 match(BO1, m_AddLike(m_Value(C), m_Value(D)));
5233 NoOp1WrapProblem = hasNoWrapProblem(*BO1, Pred, Op1HasNSW, Op1HasNUW);
5234 }
5235
5236 // icmp (A+B), A -> icmp B, 0 for equalities or if there is no overflow.
5237 // icmp (A+B), B -> icmp A, 0 for equalities or if there is no overflow.
5238 if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
5239 return new ICmpInst(Pred, A == Op1 ? B : A,
5240 Constant::getNullValue(Op1->getType()));
5241
5242 // icmp C, (C+D) -> icmp 0, D for equalities or if there is no overflow.
5243 // icmp D, (C+D) -> icmp 0, C for equalities or if there is no overflow.
5244 if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
5245 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
5246 C == Op0 ? D : C);
5247
5248 // icmp (A+B), (A+D) -> icmp B, D for equalities or if there is no overflow.
5249 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
5250 NoOp1WrapProblem) {
5251 // Determine Y and Z in the form icmp (X+Y), (X+Z).
5252 Value *Y, *Z;
5253 if (A == C) {
5254 // C + B == C + D -> B == D
5255 Y = B;
5256 Z = D;
5257 } else if (A == D) {
5258 // D + B == C + D -> B == C
5259 Y = B;
5260 Z = C;
5261 } else if (B == C) {
5262 // A + C == C + D -> A == D
5263 Y = A;
5264 Z = D;
5265 } else {
5266 assert(B == D);
5267 // A + D == C + D -> A == C
5268 Y = A;
5269 Z = C;
5270 }
5271 return new ICmpInst(Pred, Y, Z);
5272 }
5273
5274 if (ICmpInst::isRelational(Pred)) {
5275 // Return if both X and Y is divisible by Z/-Z.
5276 // TODO: Generalize to check if (X - Y) is divisible by Z/-Z.
5277 auto ShareCommonDivisor = [&Q](Value *X, Value *Y, Value *Z,
5278 bool IsNegative) -> bool {
5279 const APInt *OffsetC;
5280 if (!match(Z, m_APInt(OffsetC)))
5281 return false;
5282
5283 // Fast path for Z == 1/-1.
5284 if (IsNegative ? OffsetC->isAllOnes() : OffsetC->isOne())
5285 return true;
5286
5287 APInt C = *OffsetC;
5288 if (IsNegative)
5289 C.negate();
5290 // Note: -INT_MIN is also negative.
5291 if (!C.isStrictlyPositive())
5292 return false;
5293
5294 return isMultipleOf(X, C, Q) && isMultipleOf(Y, C, Q);
5295 };
5296
5297 // TODO: The subtraction-related identities shown below also hold, but
5298 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
5299 // wouldn't happen even if they were implemented.
5300 //
5301 // icmp ult (A - 1), Op1 -> icmp ule A, Op1
5302 // icmp uge (A - 1), Op1 -> icmp ugt A, Op1
5303 // icmp ugt Op0, (C - 1) -> icmp uge Op0, C
5304 // icmp ule Op0, (C - 1) -> icmp ult Op0, C
5305
5306 // icmp slt (A + -1), Op1 -> icmp sle A, Op1
5307 // icmp sge (A + -1), Op1 -> icmp sgt A, Op1
5308 // icmp sle (A + 1), Op1 -> icmp slt A, Op1
5309 // icmp sgt (A + 1), Op1 -> icmp sge A, Op1
5310 // icmp ule (A + 1), Op0 -> icmp ult A, Op1
5311 // icmp ugt (A + 1), Op0 -> icmp uge A, Op1
5312 if (A && NoOp0WrapProblem &&
5313 ShareCommonDivisor(A, Op1, B,
5314 ICmpInst::isLT(Pred) || ICmpInst::isGE(Pred)))
5316 Op1);
5317
5318 // icmp sgt Op0, (C + -1) -> icmp sge Op0, C
5319 // icmp sle Op0, (C + -1) -> icmp slt Op0, C
5320 // icmp sge Op0, (C + 1) -> icmp sgt Op0, C
5321 // icmp slt Op0, (C + 1) -> icmp sle Op0, C
5322 // icmp uge Op0, (C + 1) -> icmp ugt Op0, C
5323 // icmp ult Op0, (C + 1) -> icmp ule Op0, C
5324 if (C && NoOp1WrapProblem &&
5325 ShareCommonDivisor(Op0, C, D,
5326 ICmpInst::isGT(Pred) || ICmpInst::isLE(Pred)))
5328 C);
5329 }
5330
5331 // if C1 has greater magnitude than C2:
5332 // icmp (A + C1), (C + C2) -> icmp (A + C3), C
5333 // s.t. C3 = C1 - C2
5334 //
5335 // if C2 has greater magnitude than C1:
5336 // icmp (A + C1), (C + C2) -> icmp A, (C + C3)
5337 // s.t. C3 = C2 - C1
5338 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
5339 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) {
5340 const APInt *AP1, *AP2;
5341 // TODO: Support non-uniform vectors.
5342 // TODO: Allow poison passthrough if B or D's element is poison.
5343 if (match(B, m_APIntAllowPoison(AP1)) &&
5344 match(D, m_APIntAllowPoison(AP2)) &&
5345 AP1->isNegative() == AP2->isNegative()) {
5346 APInt AP1Abs = AP1->abs();
5347 APInt AP2Abs = AP2->abs();
5348 if (AP1Abs.uge(AP2Abs)) {
5349 APInt Diff = *AP1 - *AP2;
5350 Constant *C3 = Constant::getIntegerValue(BO0->getType(), Diff);
5351 Value *NewAdd = Builder.CreateAdd(
5352 A, C3, "", Op0HasNUW && Diff.ule(*AP1), Op0HasNSW);
5353 return new ICmpInst(Pred, NewAdd, C);
5354 } else {
5355 APInt Diff = *AP2 - *AP1;
5356 Constant *C3 = Constant::getIntegerValue(BO0->getType(), Diff);
5357 Value *NewAdd = Builder.CreateAdd(
5358 C, C3, "", Op1HasNUW && Diff.ule(*AP2), Op1HasNSW);
5359 return new ICmpInst(Pred, A, NewAdd);
5360 }
5361 }
5362 Constant *Cst1, *Cst2;
5363 if (match(B, m_ImmConstant(Cst1)) && match(D, m_ImmConstant(Cst2)) &&
5364 ICmpInst::isEquality(Pred)) {
5365 Constant *Diff = ConstantExpr::getSub(Cst2, Cst1);
5366 Value *NewAdd = Builder.CreateAdd(C, Diff);
5367 return new ICmpInst(Pred, A, NewAdd);
5368 }
5369 }
5370
5371 // Analyze the case when either Op0 or Op1 is a sub instruction.
5372 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
5373 A = nullptr;
5374 B = nullptr;
5375 C = nullptr;
5376 D = nullptr;
5377 if (BO0 && BO0->getOpcode() == Instruction::Sub) {
5378 A = BO0->getOperand(0);
5379 B = BO0->getOperand(1);
5380 }
5381 if (BO1 && BO1->getOpcode() == Instruction::Sub) {
5382 C = BO1->getOperand(0);
5383 D = BO1->getOperand(1);
5384 }
5385
5386 // icmp (A-B), A -> icmp 0, B for equalities or if there is no overflow.
5387 if (A == Op1 && NoOp0WrapProblem)
5388 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
5389 // icmp C, (C-D) -> icmp D, 0 for equalities or if there is no overflow.
5390 if (C == Op0 && NoOp1WrapProblem)
5391 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
5392
5393 // Convert sub-with-unsigned-overflow comparisons into a comparison of args.
5394 // (A - B) u>/u<= A --> B u>/u<= A
5395 if (A == Op1 && (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
5396 return new ICmpInst(Pred, B, A);
5397 // C u</u>= (C - D) --> C u</u>= D
5398 if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
5399 return new ICmpInst(Pred, C, D);
5400 // (A - B) u>=/u< A --> B u>/u<= A iff B != 0
5401 if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
5402 isKnownNonZero(B, Q))
5404 // C u<=/u> (C - D) --> C u</u>= D iff B != 0
5405 if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
5406 isKnownNonZero(D, Q))
5408
5409 // icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
5410 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem)
5411 return new ICmpInst(Pred, A, C);
5412
5413 // icmp (A-B), (A-D) -> icmp D, B for equalities or if there is no overflow.
5414 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem)
5415 return new ICmpInst(Pred, D, B);
5416
5417 // icmp (0-X) < cst --> x > -cst
5418 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
5419 Value *X;
5420 if (match(BO0, m_Neg(m_Value(X))))
5421 if (Constant *RHSC = dyn_cast<Constant>(Op1))
5422 if (RHSC->isNotMinSignedValue())
5423 return new ICmpInst(I.getSwappedPredicate(), X,
5424 ConstantExpr::getNeg(RHSC));
5425 }
5426
5427 if (Instruction *R = foldICmpXorXX(I, Q, *this))
5428 return R;
5429 if (Instruction *R = foldICmpOrXX(I, Q, *this))
5430 return R;
5431
5432 {
5433 // Try to remove shared multiplier from comparison:
5434 // X * Z pred Y * Z
5435 Value *X, *Y, *Z;
5436 if ((match(Op0, m_Mul(m_Value(X), m_Value(Z))) &&
5437 match(Op1, m_c_Mul(m_Specific(Z), m_Value(Y)))) ||
5438 (match(Op0, m_Mul(m_Value(Z), m_Value(X))) &&
5439 match(Op1, m_c_Mul(m_Specific(Z), m_Value(Y))))) {
5440 if (ICmpInst::isSigned(Pred)) {
5441 if (Op0HasNSW && Op1HasNSW) {
5442 KnownBits ZKnown = computeKnownBits(Z, &I);
5443 if (ZKnown.isStrictlyPositive())
5444 return new ICmpInst(Pred, X, Y);
5445 if (ZKnown.isNegative())
5446 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), X, Y);
5448 SQ.getWithInstruction(&I));
5449 if (LessThan && match(LessThan, m_One()))
5450 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Z,
5451 Constant::getNullValue(Z->getType()));
5452 Value *GreaterThan = simplifyICmpInst(ICmpInst::ICMP_SGT, X, Y,
5453 SQ.getWithInstruction(&I));
5454 if (GreaterThan && match(GreaterThan, m_One()))
5455 return new ICmpInst(Pred, Z, Constant::getNullValue(Z->getType()));
5456 }
5457 } else {
5458 bool NonZero;
5459 if (ICmpInst::isEquality(Pred)) {
5460 // If X != Y, fold (X *nw Z) eq/ne (Y *nw Z) -> Z eq/ne 0
5461 if (((Op0HasNSW && Op1HasNSW) || (Op0HasNUW && Op1HasNUW)) &&
5462 isKnownNonEqual(X, Y, SQ))
5463 return new ICmpInst(Pred, Z, Constant::getNullValue(Z->getType()));
5464
5465 KnownBits ZKnown = computeKnownBits(Z, &I);
5466 // if Z % 2 != 0
5467 // X * Z eq/ne Y * Z -> X eq/ne Y
5468 if (ZKnown.countMaxTrailingZeros() == 0)
5469 return new ICmpInst(Pred, X, Y);
5470 NonZero = !ZKnown.One.isZero() || isKnownNonZero(Z, Q);
5471 // if Z != 0 and nsw(X * Z) and nsw(Y * Z)
5472 // X * Z eq/ne Y * Z -> X eq/ne Y
5473 if (NonZero && BO0 && BO1 && Op0HasNSW && Op1HasNSW)
5474 return new ICmpInst(Pred, X, Y);
5475 } else
5476 NonZero = isKnownNonZero(Z, Q);
5477
5478 // If Z != 0 and nuw(X * Z) and nuw(Y * Z)
5479 // X * Z u{lt/le/gt/ge}/eq/ne Y * Z -> X u{lt/le/gt/ge}/eq/ne Y
5480 if (NonZero && BO0 && BO1 && Op0HasNUW && Op1HasNUW)
5481 return new ICmpInst(Pred, X, Y);
5482 }
5483 }
5484 }
5485
5486 BinaryOperator *SRem = nullptr;
5487 // icmp (srem X, Y), Y
5488 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
5489 SRem = BO0;
5490 // icmp Y, (srem X, Y)
5491 else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
5492 Op0 == BO1->getOperand(1))
5493 SRem = BO1;
5494 if (SRem) {
5495 // We don't check hasOneUse to avoid increasing register pressure because
5496 // the value we use is the same value this instruction was already using.
5497 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
5498 default:
5499 break;
5500 case ICmpInst::ICMP_EQ:
5501 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5502 case ICmpInst::ICMP_NE:
5503 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5504 case ICmpInst::ICMP_SGT:
5505 case ICmpInst::ICMP_SGE:
5506 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
5508 case ICmpInst::ICMP_SLT:
5509 case ICmpInst::ICMP_SLE:
5510 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
5512 }
5513 }
5514
5515 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() &&
5516 (BO0->hasOneUse() || BO1->hasOneUse()) &&
5517 BO0->getOperand(1) == BO1->getOperand(1)) {
5518 switch (BO0->getOpcode()) {
5519 default:
5520 break;
5521 case Instruction::Add:
5522 case Instruction::Sub:
5523 case Instruction::Xor: {
5524 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
5525 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5526
5527 const APInt *C;
5528 if (match(BO0->getOperand(1), m_APInt(C))) {
5529 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
5530 if (C->isSignMask()) {
5531 ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
5532 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
5533 }
5534
5535 // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
5536 if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
5537 ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
5538 NewPred = I.getSwappedPredicate(NewPred);
5539 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
5540 }
5541 }
5542 break;
5543 }
5544 case Instruction::Mul: {
5545 if (!I.isEquality())
5546 break;
5547
5548 const APInt *C;
5549 if (match(BO0->getOperand(1), m_APInt(C)) && !C->isZero() &&
5550 !C->isOne()) {
5551 // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
5552 // Mask = -1 >> count-trailing-zeros(C).
5553 if (unsigned TZs = C->countr_zero()) {
5554 Constant *Mask = ConstantInt::get(
5555 BO0->getType(),
5556 APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
5557 Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
5558 Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
5559 return new ICmpInst(Pred, And1, And2);
5560 }
5561 }
5562 break;
5563 }
5564 case Instruction::UDiv:
5565 case Instruction::LShr:
5566 if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
5567 break;
5568 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5569
5570 case Instruction::SDiv:
5571 if (!(I.isEquality() || match(BO0->getOperand(1), m_NonNegative())) ||
5572 !BO0->isExact() || !BO1->isExact())
5573 break;
5574 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5575
5576 case Instruction::AShr:
5577 if (!BO0->isExact() || !BO1->isExact())
5578 break;
5579 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5580
5581 case Instruction::Shl: {
5582 bool NUW = Op0HasNUW && Op1HasNUW;
5583 bool NSW = Op0HasNSW && Op1HasNSW;
5584 if (!NUW && !NSW)
5585 break;
5586 if (!NSW && I.isSigned())
5587 break;
5588 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5589 }
5590 }
5591 }
5592
5593 if (BO0) {
5594 // Transform A & (L - 1) `ult` L --> L != 0
5595 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
5596 auto BitwiseAnd = m_c_And(m_Value(), LSubOne);
5597
5598 if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
5599 auto *Zero = Constant::getNullValue(BO0->getType());
5600 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
5601 }
5602 }
5603
5604 // For unsigned predicates / eq / ne:
5605 // icmp pred (x << 1), x --> icmp getSignedPredicate(pred) x, 0
5606 // icmp pred x, (x << 1) --> icmp getSignedPredicate(pred) 0, x
5607 if (!ICmpInst::isSigned(Pred)) {
5608 if (match(Op0, m_Shl(m_Specific(Op1), m_One())))
5609 return new ICmpInst(ICmpInst::getSignedPredicate(Pred), Op1,
5610 Constant::getNullValue(Op1->getType()));
5611 else if (match(Op1, m_Shl(m_Specific(Op0), m_One())))
5612 return new ICmpInst(ICmpInst::getSignedPredicate(Pred),
5613 Constant::getNullValue(Op0->getType()), Op0);
5614 }
5615
5617 return replaceInstUsesWith(I, V);
5618
5619 if (Instruction *R = foldICmpAndXX(I, Q, *this))
5620 return R;
5621
5623 return replaceInstUsesWith(I, V);
5624
5626 return replaceInstUsesWith(I, V);
5627
5628 return nullptr;
5629}
5630
5631/// Fold icmp Pred min|max(X, Y), Z.
5634 Value *Z, CmpPredicate Pred) {
5635 Value *X = MinMax->getLHS();
5636 Value *Y = MinMax->getRHS();
5637 if (ICmpInst::isSigned(Pred) && !MinMax->isSigned())
5638 return nullptr;
5639 if (ICmpInst::isUnsigned(Pred) && MinMax->isSigned()) {
5640 // Revert the transform signed pred -> unsigned pred
5641 // TODO: We can flip the signedness of predicate if both operands of icmp
5642 // are negative.
5643 if (isKnownNonNegative(Z, SQ.getWithInstruction(&I)) &&
5644 isKnownNonNegative(MinMax, SQ.getWithInstruction(&I))) {
5646 } else
5647 return nullptr;
5648 }
5649 SimplifyQuery Q = SQ.getWithInstruction(&I);
5650 auto IsCondKnownTrue = [](Value *Val) -> std::optional<bool> {
5651 if (!Val)
5652 return std::nullopt;
5653 if (match(Val, m_One()))
5654 return true;
5655 if (match(Val, m_Zero()))
5656 return false;
5657 return std::nullopt;
5658 };
5659 // Remove samesign here since it is illegal to keep it when we speculatively
5660 // execute comparisons. For example, `icmp samesign ult umax(X, -46), -32`
5661 // cannot be decomposed into `(icmp samesign ult X, -46) or (icmp samesign ult
5662 // -46, -32)`. `X` is allowed to be non-negative here.
5663 Pred = Pred.dropSameSign();
5664 auto CmpXZ = IsCondKnownTrue(simplifyICmpInst(Pred, X, Z, Q));
5665 auto CmpYZ = IsCondKnownTrue(simplifyICmpInst(Pred, Y, Z, Q));
5666 if (!CmpXZ.has_value() && !CmpYZ.has_value())
5667 return nullptr;
5668 if (!CmpXZ.has_value()) {
5669 std::swap(X, Y);
5670 std::swap(CmpXZ, CmpYZ);
5671 }
5672
5673 auto FoldIntoCmpYZ = [&]() -> Instruction * {
5674 if (CmpYZ.has_value())
5675 return replaceInstUsesWith(I, ConstantInt::getBool(I.getType(), *CmpYZ));
5676 return ICmpInst::Create(Instruction::ICmp, Pred, Y, Z);
5677 };
5678
5679 switch (Pred) {
5680 case ICmpInst::ICMP_EQ:
5681 case ICmpInst::ICMP_NE: {
5682 // If X == Z:
5683 // Expr Result
5684 // min(X, Y) == Z X <= Y
5685 // max(X, Y) == Z X >= Y
5686 // min(X, Y) != Z X > Y
5687 // max(X, Y) != Z X < Y
5688 if ((Pred == ICmpInst::ICMP_EQ) == *CmpXZ) {
5689 ICmpInst::Predicate NewPred =
5690 ICmpInst::getNonStrictPredicate(MinMax->getPredicate());
5691 if (Pred == ICmpInst::ICMP_NE)
5692 NewPred = ICmpInst::getInversePredicate(NewPred);
5693 return ICmpInst::Create(Instruction::ICmp, NewPred, X, Y);
5694 }
5695 // Otherwise (X != Z):
5696 ICmpInst::Predicate NewPred = MinMax->getPredicate();
5697 auto MinMaxCmpXZ = IsCondKnownTrue(simplifyICmpInst(NewPred, X, Z, Q));
5698 if (!MinMaxCmpXZ.has_value()) {
5699 std::swap(X, Y);
5700 std::swap(CmpXZ, CmpYZ);
5701 // Re-check pre-condition X != Z
5702 if (!CmpXZ.has_value() || (Pred == ICmpInst::ICMP_EQ) == *CmpXZ)
5703 break;
5704 MinMaxCmpXZ = IsCondKnownTrue(simplifyICmpInst(NewPred, X, Z, Q));
5705 }
5706 if (!MinMaxCmpXZ.has_value())
5707 break;
5708 if (*MinMaxCmpXZ) {
5709 // Expr Fact Result
5710 // min(X, Y) == Z X < Z false
5711 // max(X, Y) == Z X > Z false
5712 // min(X, Y) != Z X < Z true
5713 // max(X, Y) != Z X > Z true
5714 return replaceInstUsesWith(
5715 I, ConstantInt::getBool(I.getType(), Pred == ICmpInst::ICMP_NE));
5716 } else {
5717 // Expr Fact Result
5718 // min(X, Y) == Z X > Z Y == Z
5719 // max(X, Y) == Z X < Z Y == Z
5720 // min(X, Y) != Z X > Z Y != Z
5721 // max(X, Y) != Z X < Z Y != Z
5722 return FoldIntoCmpYZ();
5723 }
5724 break;
5725 }
5726 case ICmpInst::ICMP_SLT:
5727 case ICmpInst::ICMP_ULT:
5728 case ICmpInst::ICMP_SLE:
5729 case ICmpInst::ICMP_ULE:
5730 case ICmpInst::ICMP_SGT:
5731 case ICmpInst::ICMP_UGT:
5732 case ICmpInst::ICMP_SGE:
5733 case ICmpInst::ICMP_UGE: {
5734 bool IsSame = MinMax->getPredicate() == ICmpInst::getStrictPredicate(Pred);
5735 if (*CmpXZ) {
5736 if (IsSame) {
5737 // Expr Fact Result
5738 // min(X, Y) < Z X < Z true
5739 // min(X, Y) <= Z X <= Z true
5740 // max(X, Y) > Z X > Z true
5741 // max(X, Y) >= Z X >= Z true
5742 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5743 } else {
5744 // Expr Fact Result
5745 // max(X, Y) < Z X < Z Y < Z
5746 // max(X, Y) <= Z X <= Z Y <= Z
5747 // min(X, Y) > Z X > Z Y > Z
5748 // min(X, Y) >= Z X >= Z Y >= Z
5749 return FoldIntoCmpYZ();
5750 }
5751 } else {
5752 if (IsSame) {
5753 // Expr Fact Result
5754 // min(X, Y) < Z X >= Z Y < Z
5755 // min(X, Y) <= Z X > Z Y <= Z
5756 // max(X, Y) > Z X <= Z Y > Z
5757 // max(X, Y) >= Z X < Z Y >= Z
5758 return FoldIntoCmpYZ();
5759 } else {
5760 // Expr Fact Result
5761 // max(X, Y) < Z X >= Z false
5762 // max(X, Y) <= Z X > Z false
5763 // min(X, Y) > Z X <= Z false
5764 // min(X, Y) >= Z X < Z false
5765 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5766 }
5767 }
5768 break;
5769 }
5770 default:
5771 break;
5772 }
5773
5774 return nullptr;
5775}
5776
5777/// Match and fold patterns like:
5778/// icmp eq/ne X, min(max(X, Lo), Hi)
5779/// which represents a range check and can be repsented as a ConstantRange.
5780///
5781/// For icmp eq, build ConstantRange [Lo, Hi + 1) and convert to:
5782/// (X - Lo) u< (Hi + 1 - Lo)
5783/// For icmp ne, build ConstantRange [Hi + 1, Lo) and convert to:
5784/// (X - (Hi + 1)) u< (Lo - (Hi + 1))
5786 MinMaxIntrinsic *Min) {
5787 if (!I.isEquality() || !Min->hasOneUse() || !Min->isMin())
5788 return nullptr;
5789
5790 const APInt *Lo = nullptr, *Hi = nullptr;
5791 if (Min->isSigned()) {
5792 if (!match(Min->getLHS(), m_OneUse(m_SMax(m_Specific(X), m_APInt(Lo)))) ||
5793 !match(Min->getRHS(), m_APInt(Hi)) || !Lo->slt(*Hi))
5794 return nullptr;
5795 } else {
5796 if (!match(Min->getLHS(), m_OneUse(m_UMax(m_Specific(X), m_APInt(Lo)))) ||
5797 !match(Min->getRHS(), m_APInt(Hi)) || !Lo->ult(*Hi))
5798 return nullptr;
5799 }
5800
5803 APInt C, Offset;
5804 if (I.getPredicate() == ICmpInst::ICMP_EQ)
5805 CR.getEquivalentICmp(Pred, C, Offset);
5806 else
5807 CR.inverse().getEquivalentICmp(Pred, C, Offset);
5808
5809 if (!Offset.isZero())
5810 X = Builder.CreateAdd(X, ConstantInt::get(X->getType(), Offset));
5811
5812 return replaceInstUsesWith(
5813 I, Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), C)));
5814}
5815
5816// Canonicalize checking for a power-of-2-or-zero value:
5818 InstCombiner::BuilderTy &Builder) {
5819 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5820 const CmpInst::Predicate Pred = I.getPredicate();
5821 Value *A = nullptr;
5822 bool CheckIs;
5823 if (I.isEquality()) {
5824 // (A & (A-1)) == 0 --> ctpop(A) < 2 (two commuted variants)
5825 // ((A-1) & A) != 0 --> ctpop(A) > 1 (two commuted variants)
5826 if (!match(Op0, m_OneUse(m_c_And(m_Add(m_Value(A), m_AllOnes()),
5827 m_Deferred(A)))) ||
5828 !match(Op1, m_ZeroInt()))
5829 A = nullptr;
5830
5831 // (A & -A) == A --> ctpop(A) < 2 (four commuted variants)
5832 // (-A & A) != A --> ctpop(A) > 1 (four commuted variants)
5833 if (match(Op0, m_OneUse(m_c_And(m_Neg(m_Specific(Op1)), m_Specific(Op1)))))
5834 A = Op1;
5835 else if (match(Op1,
5837 A = Op0;
5838
5839 CheckIs = Pred == ICmpInst::ICMP_EQ;
5840 } else if (ICmpInst::isUnsigned(Pred)) {
5841 // (A ^ (A-1)) u>= A --> ctpop(A) < 2 (two commuted variants)
5842 // ((A-1) ^ A) u< A --> ctpop(A) > 1 (two commuted variants)
5843
5844 if ((Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
5846 m_Specific(Op1))))) {
5847 A = Op1;
5848 CheckIs = Pred == ICmpInst::ICMP_UGE;
5849 } else if ((Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE) &&
5851 m_Specific(Op0))))) {
5852 A = Op0;
5853 CheckIs = Pred == ICmpInst::ICMP_ULE;
5854 }
5855 }
5856
5857 if (A) {
5858 Type *Ty = A->getType();
5859 CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, A);
5860 return CheckIs ? new ICmpInst(ICmpInst::ICMP_ULT, CtPop,
5861 ConstantInt::get(Ty, 2))
5862 : new ICmpInst(ICmpInst::ICMP_UGT, CtPop,
5863 ConstantInt::get(Ty, 1));
5864 }
5865
5866 return nullptr;
5867}
5868
5869/// Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
5870using OffsetOp = std::pair<Instruction::BinaryOps, Value *>;
5872 bool AllowRecursion) {
5874 if (!Inst || !Inst->hasOneUse())
5875 return;
5876
5877 switch (Inst->getOpcode()) {
5878 case Instruction::Add:
5879 Offsets.emplace_back(Instruction::Sub, Inst->getOperand(1));
5880 Offsets.emplace_back(Instruction::Sub, Inst->getOperand(0));
5881 break;
5882 case Instruction::Sub:
5883 Offsets.emplace_back(Instruction::Add, Inst->getOperand(1));
5884 break;
5885 case Instruction::Xor:
5886 Offsets.emplace_back(Instruction::Xor, Inst->getOperand(1));
5887 Offsets.emplace_back(Instruction::Xor, Inst->getOperand(0));
5888 break;
5889 case Instruction::Select:
5890 if (AllowRecursion) {
5891 collectOffsetOp(Inst->getOperand(1), Offsets, /*AllowRecursion=*/false);
5892 collectOffsetOp(Inst->getOperand(2), Offsets, /*AllowRecursion=*/false);
5893 }
5894 break;
5895 default:
5896 break;
5897 }
5898}
5899
5901
5905
5907 return {OffsetKind::Invalid, nullptr, nullptr, nullptr};
5908 }
5910 return {OffsetKind::Value, V, nullptr, nullptr};
5911 }
5912 static OffsetResult select(Value *Cond, Value *TrueV, Value *FalseV) {
5913 return {OffsetKind::Select, Cond, TrueV, FalseV};
5914 }
5915 bool isValid() const { return Kind != OffsetKind::Invalid; }
5917 switch (Kind) {
5919 llvm_unreachable("Invalid offset result");
5920 case OffsetKind::Value:
5921 return V0;
5922 case OffsetKind::Select:
5923 return Builder.CreateSelect(V0, V1, V2);
5924 }
5925 llvm_unreachable("Unknown OffsetKind enum");
5926 }
5927};
5928
5929/// Offset both sides of an equality icmp to see if we can save some
5930/// instructions: icmp eq/ne X, Y -> icmp eq/ne X op Z, Y op Z.
5931/// Note: This operation should not introduce poison.
5933 InstCombiner::BuilderTy &Builder,
5934 const SimplifyQuery &SQ) {
5935 assert(I.isEquality() && "Expected an equality icmp");
5936 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5937 if (!Op0->getType()->isIntOrIntVectorTy())
5938 return nullptr;
5939
5940 SmallVector<OffsetOp, 4> OffsetOps;
5941 collectOffsetOp(Op0, OffsetOps, /*AllowRecursion=*/true);
5942 collectOffsetOp(Op1, OffsetOps, /*AllowRecursion=*/true);
5943
5944 auto ApplyOffsetImpl = [&](Value *V, unsigned BinOpc, Value *RHS) -> Value * {
5945 Value *Simplified = simplifyBinOp(BinOpc, V, RHS, SQ);
5946 // Avoid infinite loops by checking if RHS is an identity for the BinOp.
5947 if (!Simplified || Simplified == V)
5948 return nullptr;
5949 // Reject constant expressions as they don't simplify things.
5950 if (isa<Constant>(Simplified) && !match(Simplified, m_ImmConstant()))
5951 return nullptr;
5952 // Check if the transformation introduces poison.
5953 return impliesPoison(RHS, V) ? Simplified : nullptr;
5954 };
5955
5956 auto ApplyOffset = [&](Value *V, unsigned BinOpc,
5957 Value *RHS) -> OffsetResult {
5958 if (auto *Sel = dyn_cast<SelectInst>(V)) {
5959 if (!Sel->hasOneUse())
5960 return OffsetResult::invalid();
5961 Value *TrueVal = ApplyOffsetImpl(Sel->getTrueValue(), BinOpc, RHS);
5962 if (!TrueVal)
5963 return OffsetResult::invalid();
5964 Value *FalseVal = ApplyOffsetImpl(Sel->getFalseValue(), BinOpc, RHS);
5965 if (!FalseVal)
5966 return OffsetResult::invalid();
5967 return OffsetResult::select(Sel->getCondition(), TrueVal, FalseVal);
5968 }
5969 if (Value *Simplified = ApplyOffsetImpl(V, BinOpc, RHS))
5970 return OffsetResult::value(Simplified);
5971 return OffsetResult::invalid();
5972 };
5973
5974 for (auto [BinOp, RHS] : OffsetOps) {
5975 auto BinOpc = static_cast<unsigned>(BinOp);
5976
5977 auto Op0Result = ApplyOffset(Op0, BinOpc, RHS);
5978 if (!Op0Result.isValid())
5979 continue;
5980 auto Op1Result = ApplyOffset(Op1, BinOpc, RHS);
5981 if (!Op1Result.isValid())
5982 continue;
5983
5984 Value *NewLHS = Op0Result.materialize(Builder);
5985 Value *NewRHS = Op1Result.materialize(Builder);
5986 return new ICmpInst(I.getPredicate(), NewLHS, NewRHS);
5987 }
5988
5989 return nullptr;
5990}
5991
5993 if (!I.isEquality())
5994 return nullptr;
5995
5996 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5997 const CmpInst::Predicate Pred = I.getPredicate();
5998 Value *A, *B, *C, *D;
5999 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
6000 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
6001 Value *OtherVal = A == Op1 ? B : A;
6002 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
6003 }
6004
6005 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
6006 // A^c1 == C^c2 --> A == C^(c1^c2)
6007 ConstantInt *C1, *C2;
6008 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
6009 Op1->hasOneUse()) {
6010 Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
6011 Value *Xor = Builder.CreateXor(C, NC);
6012 return new ICmpInst(Pred, A, Xor);
6013 }
6014
6015 // A^B == A^D -> B == D
6016 if (A == C)
6017 return new ICmpInst(Pred, B, D);
6018 if (A == D)
6019 return new ICmpInst(Pred, B, C);
6020 if (B == C)
6021 return new ICmpInst(Pred, A, D);
6022 if (B == D)
6023 return new ICmpInst(Pred, A, C);
6024 }
6025 }
6026
6027 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
6028 // A == (A^B) -> B == 0
6029 Value *OtherVal = A == Op0 ? B : A;
6030 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
6031 }
6032
6033 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
6034 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
6035 match(Op1, m_And(m_Value(C), m_Value(D)))) {
6036 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
6037
6038 if (A == C) {
6039 X = B;
6040 Y = D;
6041 Z = A;
6042 } else if (A == D) {
6043 X = B;
6044 Y = C;
6045 Z = A;
6046 } else if (B == C) {
6047 X = A;
6048 Y = D;
6049 Z = B;
6050 } else if (B == D) {
6051 X = A;
6052 Y = C;
6053 Z = B;
6054 }
6055
6056 if (X) {
6057 // If X^Y is a negative power of two, then `icmp eq/ne (Z & NegP2), 0`
6058 // will fold to `icmp ult/uge Z, -NegP2` incurringb no additional
6059 // instructions.
6060 const APInt *C0, *C1;
6061 bool XorIsNegP2 = match(X, m_APInt(C0)) && match(Y, m_APInt(C1)) &&
6062 (*C0 ^ *C1).isNegatedPowerOf2();
6063
6064 // If either Op0/Op1 are both one use or X^Y will constant fold and one of
6065 // Op0/Op1 are one use, proceed. In those cases we are instruction neutral
6066 // but `icmp eq/ne A, 0` is easier to analyze than `icmp eq/ne A, B`.
6067 int UseCnt =
6068 int(Op0->hasOneUse()) + int(Op1->hasOneUse()) +
6069 (int(match(X, m_ImmConstant()) && match(Y, m_ImmConstant())));
6070 if (XorIsNegP2 || UseCnt >= 2) {
6071 // Build (X^Y) & Z
6072 Op1 = Builder.CreateXor(X, Y);
6073 Op1 = Builder.CreateAnd(Op1, Z);
6074 return new ICmpInst(Pred, Op1, Constant::getNullValue(Op1->getType()));
6075 }
6076 }
6077 }
6078
6079 {
6080 // Similar to above, but specialized for constant because invert is needed:
6081 // (X | C) == (Y | C) --> (X ^ Y) & ~C == 0
6082 Value *X, *Y;
6083 Constant *C;
6084 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_Constant(C)))) &&
6085 match(Op1, m_OneUse(m_Or(m_Value(Y), m_Specific(C))))) {
6086 Value *Xor = Builder.CreateXor(X, Y);
6087 Value *And = Builder.CreateAnd(Xor, ConstantExpr::getNot(C));
6088 return new ICmpInst(Pred, And, Constant::getNullValue(And->getType()));
6089 }
6090 }
6091
6092 if (match(Op1, m_ZExt(m_Value(A))) &&
6093 (Op0->hasOneUse() || Op1->hasOneUse())) {
6094 // (B & (Pow2C-1)) == zext A --> A == trunc B
6095 // (B & (Pow2C-1)) != zext A --> A != trunc B
6096 const APInt *MaskC;
6097 if (match(Op0, m_And(m_Value(B), m_LowBitMask(MaskC))) &&
6098 MaskC->countr_one() == A->getType()->getScalarSizeInBits())
6099 return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
6100 }
6101
6102 // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
6103 // For lshr and ashr pairs.
6104 const APInt *AP1, *AP2;
6105 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_APIntAllowPoison(AP1)))) &&
6106 match(Op1, m_OneUse(m_LShr(m_Value(B), m_APIntAllowPoison(AP2))))) ||
6107 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_APIntAllowPoison(AP1)))) &&
6108 match(Op1, m_OneUse(m_AShr(m_Value(B), m_APIntAllowPoison(AP2)))))) {
6109 if (*AP1 != *AP2)
6110 return nullptr;
6111 unsigned TypeBits = AP1->getBitWidth();
6112 unsigned ShAmt = AP1->getLimitedValue(TypeBits);
6113 if (ShAmt < TypeBits && ShAmt != 0) {
6114 ICmpInst::Predicate NewPred =
6116 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
6117 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
6118 return new ICmpInst(NewPred, Xor, ConstantInt::get(A->getType(), CmpVal));
6119 }
6120 }
6121
6122 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
6123 ConstantInt *Cst1;
6124 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
6125 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
6126 unsigned TypeBits = Cst1->getBitWidth();
6127 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
6128 if (ShAmt < TypeBits && ShAmt != 0) {
6129 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
6130 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
6131 Value *And =
6132 Builder.CreateAnd(Xor, Builder.getInt(AndVal), I.getName() + ".mask");
6133 return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
6134 }
6135 }
6136
6137 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
6138 // "icmp (and X, mask), cst"
6139 uint64_t ShAmt = 0;
6140 if (Op0->hasOneUse() &&
6141 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
6142 match(Op1, m_ConstantInt(Cst1)) &&
6143 // Only do this when A has multiple uses. This is most important to do
6144 // when it exposes other optimizations.
6145 !A->hasOneUse()) {
6146 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
6147
6148 if (ShAmt < ASize) {
6149 APInt MaskV =
6151 MaskV <<= ShAmt;
6152
6153 APInt CmpV = Cst1->getValue().zext(ASize);
6154 CmpV <<= ShAmt;
6155
6156 Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
6157 return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
6158 }
6159 }
6160
6162 return ICmp;
6163
6164 // Match icmp eq (trunc (lshr A, BW), (ashr (trunc A), BW-1)), which checks
6165 // the top BW/2 + 1 bits are all the same. Create "A >=s INT_MIN && A <=s
6166 // INT_MAX", which we generate as "icmp ult (add A, 2^(BW-1)), 2^BW" to skip a
6167 // few steps of instcombine.
6168 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
6169 if (match(Op0, m_AShr(m_Trunc(m_Value(A)), m_SpecificInt(BitWidth - 1))) &&
6171 A->getType()->getScalarSizeInBits() == BitWidth * 2 &&
6172 (I.getOperand(0)->hasOneUse() || I.getOperand(1)->hasOneUse())) {
6174 Value *Add = Builder.CreateAdd(A, ConstantInt::get(A->getType(), C));
6175 return new ICmpInst(Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_ULT
6177 Add, ConstantInt::get(A->getType(), C.shl(1)));
6178 }
6179
6180 // Canonicalize:
6181 // Assume B_Pow2 != 0
6182 // 1. A & B_Pow2 != B_Pow2 -> A & B_Pow2 == 0
6183 // 2. A & B_Pow2 == B_Pow2 -> A & B_Pow2 != 0
6184 if (match(Op0, m_c_And(m_Specific(Op1), m_Value())) &&
6185 isKnownToBeAPowerOfTwo(Op1, /* OrZero */ false, &I))
6186 return new ICmpInst(CmpInst::getInversePredicate(Pred), Op0,
6188
6189 if (match(Op1, m_c_And(m_Specific(Op0), m_Value())) &&
6190 isKnownToBeAPowerOfTwo(Op0, /* OrZero */ false, &I))
6191 return new ICmpInst(CmpInst::getInversePredicate(Pred), Op1,
6192 ConstantInt::getNullValue(Op1->getType()));
6193
6194 // Canonicalize:
6195 // icmp eq/ne X, OneUse(rotate-right(X))
6196 // -> icmp eq/ne X, rotate-left(X)
6197 // We generally try to convert rotate-right -> rotate-left, this just
6198 // canonicalizes another case.
6199 if (match(&I, m_c_ICmp(m_Value(A),
6201 m_Deferred(A), m_Deferred(A), m_Value(B))))))
6202 return new ICmpInst(
6203 Pred, A,
6204 Builder.CreateIntrinsic(Op0->getType(), Intrinsic::fshl, {A, A, B}));
6205
6206 // Canonicalize:
6207 // icmp eq/ne OneUse(A ^ Cst), B --> icmp eq/ne (A ^ B), Cst
6208 Constant *Cst;
6211 return new ICmpInst(Pred, Builder.CreateXor(A, B), Cst);
6212
6213 {
6214 // (icmp eq/ne (and (add/sub/xor X, P2), P2), P2)
6215 auto m_Matcher =
6218 m_Sub(m_Value(B), m_Deferred(A)));
6219 std::optional<bool> IsZero = std::nullopt;
6220 if (match(&I, m_c_ICmp(m_OneUse(m_c_And(m_Value(A), m_Matcher)),
6221 m_Deferred(A))))
6222 IsZero = false;
6223 // (icmp eq/ne (and (add/sub/xor X, P2), P2), 0)
6224 else if (match(&I,
6225 m_ICmp(m_OneUse(m_c_And(m_Value(A), m_Matcher)), m_Zero())))
6226 IsZero = true;
6227
6228 if (IsZero && isKnownToBeAPowerOfTwo(A, /* OrZero */ true, &I))
6229 // (icmp eq/ne (and (add/sub/xor X, P2), P2), P2)
6230 // -> (icmp eq/ne (and X, P2), 0)
6231 // (icmp eq/ne (and (add/sub/xor X, P2), P2), 0)
6232 // -> (icmp eq/ne (and X, P2), P2)
6233 return new ICmpInst(Pred, Builder.CreateAnd(B, A),
6234 *IsZero ? A
6235 : ConstantInt::getNullValue(A->getType()));
6236 }
6237
6238 if (auto *Res = foldICmpEqualityWithOffset(
6239 I, Builder, getSimplifyQuery().getWithInstruction(&I)))
6240 return Res;
6241
6242 return nullptr;
6243}
6244
6246 ICmpInst::Predicate Pred = ICmp.getPredicate();
6247 Value *Op0 = ICmp.getOperand(0), *Op1 = ICmp.getOperand(1);
6248
6249 // Try to canonicalize trunc + compare-to-constant into a mask + cmp.
6250 // The trunc masks high bits while the compare may effectively mask low bits.
6251 Value *X;
6252 const APInt *C;
6253 if (!match(Op0, m_OneUse(m_Trunc(m_Value(X)))) || !match(Op1, m_APInt(C)))
6254 return nullptr;
6255
6256 // This matches patterns corresponding to tests of the signbit as well as:
6257 // (trunc X) pred C2 --> (X & Mask) == C
6258 if (auto Res = decomposeBitTestICmp(Op0, Op1, Pred, /*WithTrunc=*/true,
6259 /*AllowNonZeroC=*/true)) {
6260 Value *And = Builder.CreateAnd(Res->X, Res->Mask);
6261 Constant *C = ConstantInt::get(Res->X->getType(), Res->C);
6262 return new ICmpInst(Res->Pred, And, C);
6263 }
6264
6265 unsigned SrcBits = X->getType()->getScalarSizeInBits();
6266 if (auto *II = dyn_cast<IntrinsicInst>(X)) {
6267 if (II->getIntrinsicID() == Intrinsic::cttz ||
6268 II->getIntrinsicID() == Intrinsic::ctlz) {
6269 unsigned MaxRet = SrcBits;
6270 // If the "is_zero_poison" argument is set, then we know at least
6271 // one bit is set in the input, so the result is always at least one
6272 // less than the full bitwidth of that input.
6273 if (match(II->getArgOperand(1), m_One()))
6274 MaxRet--;
6275
6276 // Make sure the destination is wide enough to hold the largest output of
6277 // the intrinsic.
6278 if (llvm::Log2_32(MaxRet) + 1 <= Op0->getType()->getScalarSizeInBits())
6279 if (Instruction *I =
6280 foldICmpIntrinsicWithConstant(ICmp, II, C->zext(SrcBits)))
6281 return I;
6282 }
6283 }
6284
6285 return nullptr;
6286}
6287
6289 assert(isa<CastInst>(ICmp.getOperand(0)) && "Expected cast for operand 0");
6290 auto *CastOp0 = cast<CastInst>(ICmp.getOperand(0));
6291 Value *X;
6292 if (!match(CastOp0, m_ZExtOrSExt(m_Value(X))))
6293 return nullptr;
6294
6295 bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
6296 bool IsSignedCmp = ICmp.isSigned();
6297
6298 // icmp Pred (ext X), (ext Y)
6299 Value *Y;
6300 if (match(ICmp.getOperand(1), m_ZExtOrSExt(m_Value(Y)))) {
6301 bool IsZext0 = isa<ZExtInst>(ICmp.getOperand(0));
6302 bool IsZext1 = isa<ZExtInst>(ICmp.getOperand(1));
6303
6304 if (IsZext0 != IsZext1) {
6305 // If X and Y and both i1
6306 // (icmp eq/ne (zext X) (sext Y))
6307 // eq -> (icmp eq (or X, Y), 0)
6308 // ne -> (icmp ne (or X, Y), 0)
6309 if (ICmp.isEquality() && X->getType()->isIntOrIntVectorTy(1) &&
6310 Y->getType()->isIntOrIntVectorTy(1))
6311 return new ICmpInst(ICmp.getPredicate(), Builder.CreateOr(X, Y),
6312 Constant::getNullValue(X->getType()));
6313
6314 // If we have mismatched casts and zext has the nneg flag, we can
6315 // treat the "zext nneg" as "sext". Otherwise, we cannot fold and quit.
6316
6317 auto *NonNegInst0 = dyn_cast<PossiblyNonNegInst>(ICmp.getOperand(0));
6318 auto *NonNegInst1 = dyn_cast<PossiblyNonNegInst>(ICmp.getOperand(1));
6319
6320 bool IsNonNeg0 = NonNegInst0 && NonNegInst0->hasNonNeg();
6321 bool IsNonNeg1 = NonNegInst1 && NonNegInst1->hasNonNeg();
6322
6323 if ((IsZext0 && IsNonNeg0) || (IsZext1 && IsNonNeg1))
6324 IsSignedExt = true;
6325 else
6326 return nullptr;
6327 }
6328
6329 // Not an extension from the same type?
6330 Type *XTy = X->getType(), *YTy = Y->getType();
6331 if (XTy != YTy) {
6332 // One of the casts must have one use because we are creating a new cast.
6333 if (!ICmp.getOperand(0)->hasOneUse() && !ICmp.getOperand(1)->hasOneUse())
6334 return nullptr;
6335 // Extend the narrower operand to the type of the wider operand.
6336 CastInst::CastOps CastOpcode =
6337 IsSignedExt ? Instruction::SExt : Instruction::ZExt;
6338 if (XTy->getScalarSizeInBits() < YTy->getScalarSizeInBits())
6339 X = Builder.CreateCast(CastOpcode, X, YTy);
6340 else if (YTy->getScalarSizeInBits() < XTy->getScalarSizeInBits())
6341 Y = Builder.CreateCast(CastOpcode, Y, XTy);
6342 else
6343 return nullptr;
6344 }
6345
6346 // (zext X) == (zext Y) --> X == Y
6347 // (sext X) == (sext Y) --> X == Y
6348 if (ICmp.isEquality())
6349 return new ICmpInst(ICmp.getPredicate(), X, Y);
6350
6351 // A signed comparison of sign extended values simplifies into a
6352 // signed comparison.
6353 if (IsSignedCmp && IsSignedExt)
6354 return new ICmpInst(ICmp.getPredicate(), X, Y);
6355
6356 // The other three cases all fold into an unsigned comparison.
6357 return new ICmpInst(ICmp.getUnsignedPredicate(), X, Y);
6358 }
6359
6360 // Below here, we are only folding a compare with constant.
6361 auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
6362 if (!C)
6363 return nullptr;
6364
6365 // If a lossless truncate is possible...
6366 Type *SrcTy = CastOp0->getSrcTy();
6367 Constant *Res = getLosslessInvCast(C, SrcTy, CastOp0->getOpcode(), DL);
6368 if (Res) {
6369 if (ICmp.isEquality())
6370 return new ICmpInst(ICmp.getPredicate(), X, Res);
6371
6372 // A signed comparison of sign extended values simplifies into a
6373 // signed comparison.
6374 if (IsSignedExt && IsSignedCmp)
6375 return new ICmpInst(ICmp.getPredicate(), X, Res);
6376
6377 // The other three cases all fold into an unsigned comparison.
6378 return new ICmpInst(ICmp.getUnsignedPredicate(), X, Res);
6379 }
6380
6381 // The re-extended constant changed, partly changed (in the case of a vector),
6382 // or could not be determined to be equal (in the case of a constant
6383 // expression), so the constant cannot be represented in the shorter type.
6384 // All the cases that fold to true or false will have already been handled
6385 // by simplifyICmpInst, so only deal with the tricky case.
6386 if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(C))
6387 return nullptr;
6388
6389 // Is source op positive?
6390 // icmp ult (sext X), C --> icmp sgt X, -1
6391 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
6393
6394 // Is source op negative?
6395 // icmp ugt (sext X), C --> icmp slt X, 0
6396 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
6398}
6399
6400/// Handle icmp (cast x), (cast or constant).
6402 // If any operand of ICmp is a inttoptr roundtrip cast then remove it as
6403 // icmp compares only pointer's value.
6404 // icmp (inttoptr (ptrtoint p1)), p2 --> icmp p1, p2.
6405 Value *SimplifiedOp0 = simplifyIntToPtrRoundTripCast(ICmp.getOperand(0));
6406 Value *SimplifiedOp1 = simplifyIntToPtrRoundTripCast(ICmp.getOperand(1));
6407 if (SimplifiedOp0 || SimplifiedOp1)
6408 return new ICmpInst(ICmp.getPredicate(),
6409 SimplifiedOp0 ? SimplifiedOp0 : ICmp.getOperand(0),
6410 SimplifiedOp1 ? SimplifiedOp1 : ICmp.getOperand(1));
6411
6412 auto *CastOp0 = dyn_cast<CastInst>(ICmp.getOperand(0));
6413 if (!CastOp0)
6414 return nullptr;
6415 if (!isa<Constant>(ICmp.getOperand(1)) && !isa<CastInst>(ICmp.getOperand(1)))
6416 return nullptr;
6417
6418 Value *Op0Src = CastOp0->getOperand(0);
6419 Type *SrcTy = CastOp0->getSrcTy();
6420 Type *DestTy = CastOp0->getDestTy();
6421
6422 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
6423 // integer type is the same size as the pointer type.
6424 auto CompatibleSizes = [&](Type *PtrTy, Type *IntTy) {
6425 if (isa<VectorType>(PtrTy)) {
6426 PtrTy = cast<VectorType>(PtrTy)->getElementType();
6427 IntTy = cast<VectorType>(IntTy)->getElementType();
6428 }
6429 return DL.getPointerTypeSizeInBits(PtrTy) == IntTy->getIntegerBitWidth();
6430 };
6431 if (CastOp0->getOpcode() == Instruction::PtrToInt &&
6432 CompatibleSizes(SrcTy, DestTy)) {
6433 Value *NewOp1 = nullptr;
6434 if (auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
6435 Value *PtrSrc = PtrToIntOp1->getOperand(0);
6436 if (PtrSrc->getType() == Op0Src->getType())
6437 NewOp1 = PtrToIntOp1->getOperand(0);
6438 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
6439 NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy);
6440 }
6441
6442 if (NewOp1)
6443 return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
6444 }
6445
6446 // Do the same in the other direction for icmp (inttoptr x), (inttoptr/c).
6447 if (CastOp0->getOpcode() == Instruction::IntToPtr &&
6448 CompatibleSizes(DestTy, SrcTy)) {
6449 Value *NewOp1 = nullptr;
6450 if (auto *IntToPtrOp1 = dyn_cast<IntToPtrInst>(ICmp.getOperand(1))) {
6451 Value *IntSrc = IntToPtrOp1->getOperand(0);
6452 if (IntSrc->getType() == Op0Src->getType())
6453 NewOp1 = IntToPtrOp1->getOperand(0);
6454 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
6455 NewOp1 = ConstantFoldConstant(ConstantExpr::getPtrToInt(RHSC, SrcTy), DL);
6456 }
6457
6458 if (NewOp1)
6459 return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
6460 }
6461
6462 if (Instruction *R = foldICmpWithTrunc(ICmp))
6463 return R;
6464
6465 return foldICmpWithZextOrSext(ICmp);
6466}
6467
6469 bool IsSigned) {
6470 switch (BinaryOp) {
6471 default:
6472 llvm_unreachable("Unsupported binary op");
6473 case Instruction::Add:
6474 case Instruction::Sub:
6475 return match(RHS, m_Zero());
6476 case Instruction::Mul:
6477 return !(RHS->getType()->isIntOrIntVectorTy(1) && IsSigned) &&
6478 match(RHS, m_One());
6479 }
6480}
6481
6484 bool IsSigned, Value *LHS, Value *RHS,
6485 Instruction *CxtI) const {
6486 switch (BinaryOp) {
6487 default:
6488 llvm_unreachable("Unsupported binary op");
6489 case Instruction::Add:
6490 if (IsSigned)
6491 return computeOverflowForSignedAdd(LHS, RHS, CxtI);
6492 else
6493 return computeOverflowForUnsignedAdd(LHS, RHS, CxtI);
6494 case Instruction::Sub:
6495 if (IsSigned)
6496 return computeOverflowForSignedSub(LHS, RHS, CxtI);
6497 else
6498 return computeOverflowForUnsignedSub(LHS, RHS, CxtI);
6499 case Instruction::Mul:
6500 if (IsSigned)
6501 return computeOverflowForSignedMul(LHS, RHS, CxtI);
6502 else
6503 return computeOverflowForUnsignedMul(LHS, RHS, CxtI);
6504 }
6505}
6506
6507bool InstCombinerImpl::OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp,
6508 bool IsSigned, Value *LHS,
6509 Value *RHS, Instruction &OrigI,
6510 Value *&Result,
6511 Constant *&Overflow) {
6512 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
6513 std::swap(LHS, RHS);
6514
6515 // If the overflow check was an add followed by a compare, the insertion point
6516 // may be pointing to the compare. We want to insert the new instructions
6517 // before the add in case there are uses of the add between the add and the
6518 // compare.
6519 Builder.SetInsertPoint(&OrigI);
6520
6521 Type *OverflowTy = Type::getInt1Ty(LHS->getContext());
6522 if (auto *LHSTy = dyn_cast<VectorType>(LHS->getType()))
6523 OverflowTy = VectorType::get(OverflowTy, LHSTy->getElementCount());
6524
6525 if (isNeutralValue(BinaryOp, RHS, IsSigned)) {
6526 Result = LHS;
6527 Overflow = ConstantInt::getFalse(OverflowTy);
6528 return true;
6529 }
6530
6531 switch (computeOverflow(BinaryOp, IsSigned, LHS, RHS, &OrigI)) {
6533 return false;
6536 Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
6537 Result->takeName(&OrigI);
6538 Overflow = ConstantInt::getTrue(OverflowTy);
6539 return true;
6541 Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
6542 Result->takeName(&OrigI);
6543 Overflow = ConstantInt::getFalse(OverflowTy);
6544 if (auto *Inst = dyn_cast<Instruction>(Result)) {
6545 if (IsSigned)
6546 Inst->setHasNoSignedWrap();
6547 else
6548 Inst->setHasNoUnsignedWrap();
6549 }
6550 return true;
6551 }
6552
6553 llvm_unreachable("Unexpected overflow result");
6554}
6555
6556/// Recognize and process idiom involving test for multiplication
6557/// overflow.
6558///
6559/// The caller has matched a pattern of the form:
6560/// I = cmp u (mul(zext A, zext B), V
6561/// The function checks if this is a test for overflow and if so replaces
6562/// multiplication with call to 'mul.with.overflow' intrinsic.
6563///
6564/// \param I Compare instruction.
6565/// \param MulVal Result of 'mult' instruction. It is one of the arguments of
6566/// the compare instruction. Must be of integer type.
6567/// \param OtherVal The other argument of compare instruction.
6568/// \returns Instruction which must replace the compare instruction, NULL if no
6569/// replacement required.
6571 const APInt *OtherVal,
6572 InstCombinerImpl &IC) {
6573 // Don't bother doing this transformation for pointers, don't do it for
6574 // vectors.
6575 if (!isa<IntegerType>(MulVal->getType()))
6576 return nullptr;
6577
6578 auto *MulInstr = dyn_cast<Instruction>(MulVal);
6579 if (!MulInstr)
6580 return nullptr;
6581 assert(MulInstr->getOpcode() == Instruction::Mul);
6582
6583 auto *LHS = cast<ZExtInst>(MulInstr->getOperand(0)),
6584 *RHS = cast<ZExtInst>(MulInstr->getOperand(1));
6585 assert(LHS->getOpcode() == Instruction::ZExt);
6586 assert(RHS->getOpcode() == Instruction::ZExt);
6587 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
6588
6589 // Calculate type and width of the result produced by mul.with.overflow.
6590 Type *TyA = A->getType(), *TyB = B->getType();
6591 unsigned WidthA = TyA->getPrimitiveSizeInBits(),
6592 WidthB = TyB->getPrimitiveSizeInBits();
6593 unsigned MulWidth;
6594 Type *MulType;
6595 if (WidthB > WidthA) {
6596 MulWidth = WidthB;
6597 MulType = TyB;
6598 } else {
6599 MulWidth = WidthA;
6600 MulType = TyA;
6601 }
6602
6603 // In order to replace the original mul with a narrower mul.with.overflow,
6604 // all uses must ignore upper bits of the product. The number of used low
6605 // bits must be not greater than the width of mul.with.overflow.
6606 if (MulVal->hasNUsesOrMore(2))
6607 for (User *U : MulVal->users()) {
6608 if (U == &I)
6609 continue;
6610 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
6611 // Check if truncation ignores bits above MulWidth.
6612 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
6613 if (TruncWidth > MulWidth)
6614 return nullptr;
6615 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
6616 // Check if AND ignores bits above MulWidth.
6617 if (BO->getOpcode() != Instruction::And)
6618 return nullptr;
6619 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
6620 const APInt &CVal = CI->getValue();
6621 if (CVal.getBitWidth() - CVal.countl_zero() > MulWidth)
6622 return nullptr;
6623 } else {
6624 // In this case we could have the operand of the binary operation
6625 // being defined in another block, and performing the replacement
6626 // could break the dominance relation.
6627 return nullptr;
6628 }
6629 } else {
6630 // Other uses prohibit this transformation.
6631 return nullptr;
6632 }
6633 }
6634
6635 // Recognize patterns
6636 switch (I.getPredicate()) {
6637 case ICmpInst::ICMP_UGT: {
6638 // Recognize pattern:
6639 // mulval = mul(zext A, zext B)
6640 // cmp ugt mulval, max
6641 APInt MaxVal = APInt::getMaxValue(MulWidth);
6642 MaxVal = MaxVal.zext(OtherVal->getBitWidth());
6643 if (MaxVal.eq(*OtherVal))
6644 break; // Recognized
6645 return nullptr;
6646 }
6647
6648 case ICmpInst::ICMP_ULT: {
6649 // Recognize pattern:
6650 // mulval = mul(zext A, zext B)
6651 // cmp ule mulval, max + 1
6652 APInt MaxVal = APInt::getOneBitSet(OtherVal->getBitWidth(), MulWidth);
6653 if (MaxVal.eq(*OtherVal))
6654 break; // Recognized
6655 return nullptr;
6656 }
6657
6658 default:
6659 return nullptr;
6660 }
6661
6662 InstCombiner::BuilderTy &Builder = IC.Builder;
6663 Builder.SetInsertPoint(MulInstr);
6664
6665 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
6666 Value *MulA = A, *MulB = B;
6667 if (WidthA < MulWidth)
6668 MulA = Builder.CreateZExt(A, MulType);
6669 if (WidthB < MulWidth)
6670 MulB = Builder.CreateZExt(B, MulType);
6671 CallInst *Call =
6672 Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, MulType,
6673 {MulA, MulB}, /*FMFSource=*/nullptr, "umul");
6674 IC.addToWorklist(MulInstr);
6675
6676 // If there are uses of mul result other than the comparison, we know that
6677 // they are truncation or binary AND. Change them to use result of
6678 // mul.with.overflow and adjust properly mask/size.
6679 if (MulVal->hasNUsesOrMore(2)) {
6680 Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
6681 for (User *U : make_early_inc_range(MulVal->users())) {
6682 if (U == &I)
6683 continue;
6684 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
6685 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
6686 IC.replaceInstUsesWith(*TI, Mul);
6687 else
6688 TI->setOperand(0, Mul);
6689 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
6690 assert(BO->getOpcode() == Instruction::And);
6691 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
6692 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
6693 APInt ShortMask = CI->getValue().trunc(MulWidth);
6694 Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
6695 Value *Zext = Builder.CreateZExt(ShortAnd, BO->getType());
6696 IC.replaceInstUsesWith(*BO, Zext);
6697 } else {
6698 llvm_unreachable("Unexpected Binary operation");
6699 }
6701 }
6702 }
6703
6704 // The original icmp gets replaced with the overflow value, maybe inverted
6705 // depending on predicate.
6706 if (I.getPredicate() == ICmpInst::ICMP_ULT) {
6707 Value *Res = Builder.CreateExtractValue(Call, 1);
6708 return BinaryOperator::CreateNot(Res);
6709 }
6710
6711 return ExtractValueInst::Create(Call, 1);
6712}
6713
6714/// When performing a comparison against a constant, it is possible that not all
6715/// the bits in the LHS are demanded. This helper method computes the mask that
6716/// IS demanded.
6718 const APInt *RHS;
6719 if (!match(I.getOperand(1), m_APInt(RHS)))
6721
6722 // If this is a normal comparison, it demands all bits. If it is a sign bit
6723 // comparison, it only demands the sign bit.
6724 bool UnusedBit;
6725 if (isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
6727
6728 switch (I.getPredicate()) {
6729 // For a UGT comparison, we don't care about any bits that
6730 // correspond to the trailing ones of the comparand. The value of these
6731 // bits doesn't impact the outcome of the comparison, because any value
6732 // greater than the RHS must differ in a bit higher than these due to carry.
6733 case ICmpInst::ICMP_UGT:
6734 return APInt::getBitsSetFrom(BitWidth, RHS->countr_one());
6735
6736 // Similarly, for a ULT comparison, we don't care about the trailing zeros.
6737 // Any value less than the RHS must differ in a higher bit because of carries.
6738 case ICmpInst::ICMP_ULT:
6739 return APInt::getBitsSetFrom(BitWidth, RHS->countr_zero());
6740
6741 default:
6743 }
6744}
6745
6746/// Check that one use is in the same block as the definition and all
6747/// other uses are in blocks dominated by a given block.
6748///
6749/// \param DI Definition
6750/// \param UI Use
6751/// \param DB Block that must dominate all uses of \p DI outside
6752/// the parent block
6753/// \return true when \p UI is the only use of \p DI in the parent block
6754/// and all other uses of \p DI are in blocks dominated by \p DB.
6755///
6757 const Instruction *UI,
6758 const BasicBlock *DB) const {
6759 assert(DI && UI && "Instruction not defined\n");
6760 // Ignore incomplete definitions.
6761 if (!DI->getParent())
6762 return false;
6763 // DI and UI must be in the same block.
6764 if (DI->getParent() != UI->getParent())
6765 return false;
6766 // Protect from self-referencing blocks.
6767 if (DI->getParent() == DB)
6768 return false;
6769 for (const User *U : DI->users()) {
6770 auto *Usr = cast<Instruction>(U);
6771 if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
6772 return false;
6773 }
6774 return true;
6775}
6776
6777/// Return true when the instruction sequence within a block is select-cmp-br.
6779 const BasicBlock *BB = SI->getParent();
6780 if (!BB)
6781 return false;
6783 if (!BI || BI->getNumSuccessors() != 2)
6784 return false;
6785 auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
6786 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
6787 return false;
6788 return true;
6789}
6790
6791/// True when a select result is replaced by one of its operands
6792/// in select-icmp sequence. This will eventually result in the elimination
6793/// of the select.
6794///
6795/// \param SI Select instruction
6796/// \param Icmp Compare instruction
6797/// \param SIOpd Operand that replaces the select
6798///
6799/// Notes:
6800/// - The replacement is global and requires dominator information
6801/// - The caller is responsible for the actual replacement
6802///
6803/// Example:
6804///
6805/// entry:
6806/// %4 = select i1 %3, %C* %0, %C* null
6807/// %5 = icmp eq %C* %4, null
6808/// br i1 %5, label %9, label %7
6809/// ...
6810/// ; <label>:7 ; preds = %entry
6811/// %8 = getelementptr inbounds %C* %4, i64 0, i32 0
6812/// ...
6813///
6814/// can be transformed to
6815///
6816/// %5 = icmp eq %C* %0, null
6817/// %6 = select i1 %3, i1 %5, i1 true
6818/// br i1 %6, label %9, label %7
6819/// ...
6820/// ; <label>:7 ; preds = %entry
6821/// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0!
6822///
6823/// Similar when the first operand of the select is a constant or/and
6824/// the compare is for not equal rather than equal.
6825///
6826/// NOTE: The function is only called when the select and compare constants
6827/// are equal, the optimization can work only for EQ predicates. This is not a
6828/// major restriction since a NE compare should be 'normalized' to an equal
6829/// compare, which usually happens in the combiner and test case
6830/// select-cmp-br.ll checks for it.
6832 const ICmpInst *Icmp,
6833 const unsigned SIOpd) {
6834 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
6836 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
6837 // The check for the single predecessor is not the best that can be
6838 // done. But it protects efficiently against cases like when SI's
6839 // home block has two successors, Succ and Succ1, and Succ1 predecessor
6840 // of Succ. Then SI can't be replaced by SIOpd because the use that gets
6841 // replaced can be reached on either path. So the uniqueness check
6842 // guarantees that the path all uses of SI (outside SI's parent) are on
6843 // is disjoint from all other paths out of SI. But that information
6844 // is more expensive to compute, and the trade-off here is in favor
6845 // of compile-time. It should also be noticed that we check for a single
6846 // predecessor and not only uniqueness. This to handle the situation when
6847 // Succ and Succ1 points to the same basic block.
6848 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
6849 NumSel++;
6850 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
6851 return true;
6852 }
6853 }
6854 return false;
6855}
6856
6857/// Try to fold the comparison based on range information we can get by checking
6858/// whether bits are known to be zero or one in the inputs.
6860 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6861 Type *Ty = Op0->getType();
6862 ICmpInst::Predicate Pred = I.getPredicate();
6863
6864 // Get scalar or pointer size.
6865 unsigned BitWidth = Ty->isIntOrIntVectorTy()
6866 ? Ty->getScalarSizeInBits()
6867 : DL.getPointerTypeSizeInBits(Ty->getScalarType());
6868
6869 if (!BitWidth)
6870 return nullptr;
6871
6872 KnownBits Op0Known(BitWidth);
6873 KnownBits Op1Known(BitWidth);
6874
6875 {
6876 // Don't use dominating conditions when folding icmp using known bits. This
6877 // may convert signed into unsigned predicates in ways that other passes
6878 // (especially IndVarSimplify) may not be able to reliably undo.
6879 SimplifyQuery Q = SQ.getWithoutDomCondCache().getWithInstruction(&I);
6881 Op0Known, Q))
6882 return &I;
6883
6884 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnes(BitWidth), Op1Known, Q))
6885 return &I;
6886 }
6887
6888 if (!isa<Constant>(Op0) && Op0Known.isConstant())
6889 return new ICmpInst(
6890 Pred, ConstantExpr::getIntegerValue(Ty, Op0Known.getConstant()), Op1);
6891 if (!isa<Constant>(Op1) && Op1Known.isConstant())
6892 return new ICmpInst(
6893 Pred, Op0, ConstantExpr::getIntegerValue(Ty, Op1Known.getConstant()));
6894
6895 if (std::optional<bool> Res = ICmpInst::compare(Op0Known, Op1Known, Pred))
6896 return replaceInstUsesWith(I, ConstantInt::getBool(I.getType(), *Res));
6897
6898 // Given the known and unknown bits, compute a range that the LHS could be
6899 // in. Compute the Min, Max and RHS values based on the known bits. For the
6900 // EQ and NE we use unsigned values.
6901 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
6902 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
6903 if (I.isSigned()) {
6904 Op0Min = Op0Known.getSignedMinValue();
6905 Op0Max = Op0Known.getSignedMaxValue();
6906 Op1Min = Op1Known.getSignedMinValue();
6907 Op1Max = Op1Known.getSignedMaxValue();
6908 } else {
6909 Op0Min = Op0Known.getMinValue();
6910 Op0Max = Op0Known.getMaxValue();
6911 Op1Min = Op1Known.getMinValue();
6912 Op1Max = Op1Known.getMaxValue();
6913 }
6914
6915 // Don't break up a clamp pattern -- (min(max X, Y), Z) -- by replacing a
6916 // min/max canonical compare with some other compare. That could lead to
6917 // conflict with select canonicalization and infinite looping.
6918 // FIXME: This constraint may go away if min/max intrinsics are canonical.
6919 auto isMinMaxCmp = [&](Instruction &Cmp) {
6920 if (!Cmp.hasOneUse())
6921 return false;
6922 Value *A, *B;
6923 SelectPatternFlavor SPF = matchSelectPattern(Cmp.user_back(), A, B).Flavor;
6925 return false;
6926 return match(Op0, m_MaxOrMin(m_Value(), m_Value())) ||
6927 match(Op1, m_MaxOrMin(m_Value(), m_Value()));
6928 };
6929 if (!isMinMaxCmp(I)) {
6930 switch (Pred) {
6931 default:
6932 break;
6933 case ICmpInst::ICMP_ULT: {
6934 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
6935 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6936 const APInt *CmpC;
6937 if (match(Op1, m_APInt(CmpC))) {
6938 // A <u C -> A == C-1 if min(A)+1 == C
6939 if (*CmpC == Op0Min + 1)
6940 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6941 ConstantInt::get(Op1->getType(), *CmpC - 1));
6942 // X <u C --> X == 0, if the number of zero bits in the bottom of X
6943 // exceeds the log2 of C.
6944 if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2())
6945 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6946 Constant::getNullValue(Op1->getType()));
6947 }
6948 break;
6949 }
6950 case ICmpInst::ICMP_UGT: {
6951 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
6952 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6953 const APInt *CmpC;
6954 if (match(Op1, m_APInt(CmpC))) {
6955 // A >u C -> A == C+1 if max(a)-1 == C
6956 if (*CmpC == Op0Max - 1)
6957 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6958 ConstantInt::get(Op1->getType(), *CmpC + 1));
6959 // X >u C --> X != 0, if the number of zero bits in the bottom of X
6960 // exceeds the log2 of C.
6961 if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits())
6962 return new ICmpInst(ICmpInst::ICMP_NE, Op0,
6963 Constant::getNullValue(Op1->getType()));
6964 }
6965 break;
6966 }
6967 case ICmpInst::ICMP_SLT: {
6968 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
6969 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6970 const APInt *CmpC;
6971 if (match(Op1, m_APInt(CmpC))) {
6972 if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
6973 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6974 ConstantInt::get(Op1->getType(), *CmpC - 1));
6975 }
6976 break;
6977 }
6978 case ICmpInst::ICMP_SGT: {
6979 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
6980 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6981 const APInt *CmpC;
6982 if (match(Op1, m_APInt(CmpC))) {
6983 if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
6984 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6985 ConstantInt::get(Op1->getType(), *CmpC + 1));
6986 }
6987 break;
6988 }
6989 }
6990 }
6991
6992 // Based on the range information we know about the LHS, see if we can
6993 // simplify this comparison. For example, (x&4) < 8 is always true.
6994 switch (Pred) {
6995 default:
6996 break;
6997 case ICmpInst::ICMP_EQ:
6998 case ICmpInst::ICMP_NE: {
6999 // If all bits are known zero except for one, then we know at most one bit
7000 // is set. If the comparison is against zero, then this is a check to see if
7001 // *that* bit is set.
7002 APInt Op0KnownZeroInverted = ~Op0Known.Zero;
7003 if (Op1Known.isZero()) {
7004 // If the LHS is an AND with the same constant, look through it.
7005 Value *LHS = nullptr;
7006 const APInt *LHSC;
7007 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
7008 *LHSC != Op0KnownZeroInverted)
7009 LHS = Op0;
7010
7011 Value *X;
7012 const APInt *C1;
7013 if (match(LHS, m_Shl(m_Power2(C1), m_Value(X)))) {
7014 Type *XTy = X->getType();
7015 unsigned Log2C1 = C1->countr_zero();
7016 APInt C2 = Op0KnownZeroInverted;
7017 APInt C2Pow2 = (C2 & ~(*C1 - 1)) + *C1;
7018 if (C2Pow2.isPowerOf2()) {
7019 // iff (C1 is pow2) & ((C2 & ~(C1-1)) + C1) is pow2):
7020 // ((C1 << X) & C2) == 0 -> X >= (Log2(C2+C1) - Log2(C1))
7021 // ((C1 << X) & C2) != 0 -> X < (Log2(C2+C1) - Log2(C1))
7022 unsigned Log2C2 = C2Pow2.countr_zero();
7023 auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1);
7024 auto NewPred =
7026 return new ICmpInst(NewPred, X, CmpC);
7027 }
7028 }
7029 }
7030
7031 // Op0 eq C_Pow2 -> Op0 ne 0 if Op0 is known to be C_Pow2 or zero.
7032 if (Op1Known.isConstant() && Op1Known.getConstant().isPowerOf2() &&
7033 (Op0Known & Op1Known) == Op0Known)
7034 return new ICmpInst(CmpInst::getInversePredicate(Pred), Op0,
7035 ConstantInt::getNullValue(Op1->getType()));
7036 break;
7037 }
7038 case ICmpInst::ICMP_SGE:
7039 if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B)
7040 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7041 break;
7042 case ICmpInst::ICMP_SLE:
7043 if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B)
7044 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7045 break;
7046 case ICmpInst::ICMP_UGE:
7047 if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B)
7048 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7049 break;
7050 case ICmpInst::ICMP_ULE:
7051 if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B)
7052 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7053 break;
7054 }
7055
7056 // Turn a signed comparison into an unsigned one if both operands are known to
7057 // have the same sign. Set samesign if possible (except for equality
7058 // predicates).
7059 if ((I.isSigned() || (I.isUnsigned() && !I.hasSameSign())) &&
7060 ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
7061 (Op0Known.One.isNegative() && Op1Known.One.isNegative()))) {
7062 I.setPredicate(I.getUnsignedPredicate());
7063 I.setSameSign();
7064 return &I;
7065 }
7066
7067 return nullptr;
7068}
7069
7070/// If one operand of an icmp is effectively a bool (value range of {0,1}),
7071/// then try to reduce patterns based on that limit.
7073 Value *X, *Y;
7074 CmpPredicate Pred;
7075
7076 // X must be 0 and bool must be true for "ULT":
7077 // X <u (zext i1 Y) --> (X == 0) & Y
7078 if (match(&I, m_c_ICmp(Pred, m_Value(X), m_OneUse(m_ZExt(m_Value(Y))))) &&
7079 Y->getType()->isIntOrIntVectorTy(1) && Pred == ICmpInst::ICMP_ULT)
7080 return BinaryOperator::CreateAnd(Builder.CreateIsNull(X), Y);
7081
7082 // X must be 0 or bool must be true for "ULE":
7083 // X <=u (sext i1 Y) --> (X == 0) | Y
7084 if (match(&I, m_c_ICmp(Pred, m_Value(X), m_OneUse(m_SExt(m_Value(Y))))) &&
7085 Y->getType()->isIntOrIntVectorTy(1) && Pred == ICmpInst::ICMP_ULE)
7086 return BinaryOperator::CreateOr(Builder.CreateIsNull(X), Y);
7087
7088 // icmp eq/ne X, (zext/sext (icmp eq/ne X, C))
7089 CmpPredicate Pred1, Pred2;
7090 const APInt *C;
7091 Instruction *ExtI;
7092 if (match(&I, m_c_ICmp(Pred1, m_Value(X),
7095 m_APInt(C)))))) &&
7096 ICmpInst::isEquality(Pred1) && ICmpInst::isEquality(Pred2)) {
7097 bool IsSExt = ExtI->getOpcode() == Instruction::SExt;
7098 bool HasOneUse = ExtI->hasOneUse() && ExtI->getOperand(0)->hasOneUse();
7099 auto CreateRangeCheck = [&] {
7100 Value *CmpV1 =
7101 Builder.CreateICmp(Pred1, X, Constant::getNullValue(X->getType()));
7102 Value *CmpV2 = Builder.CreateICmp(
7103 Pred1, X, ConstantInt::getSigned(X->getType(), IsSExt ? -1 : 1));
7105 Pred1 == ICmpInst::ICMP_EQ ? Instruction::Or : Instruction::And,
7106 CmpV1, CmpV2);
7107 };
7108 if (C->isZero()) {
7109 if (Pred2 == ICmpInst::ICMP_EQ) {
7110 // icmp eq X, (zext/sext (icmp eq X, 0)) --> false
7111 // icmp ne X, (zext/sext (icmp eq X, 0)) --> true
7112 return replaceInstUsesWith(
7113 I, ConstantInt::getBool(I.getType(), Pred1 == ICmpInst::ICMP_NE));
7114 } else if (!IsSExt || HasOneUse) {
7115 // icmp eq X, (zext (icmp ne X, 0)) --> X == 0 || X == 1
7116 // icmp ne X, (zext (icmp ne X, 0)) --> X != 0 && X != 1
7117 // icmp eq X, (sext (icmp ne X, 0)) --> X == 0 || X == -1
7118 // icmp ne X, (sext (icmp ne X, 0)) --> X != 0 && X != -1
7119 return CreateRangeCheck();
7120 }
7121 } else if (IsSExt ? C->isAllOnes() : C->isOne()) {
7122 if (Pred2 == ICmpInst::ICMP_NE) {
7123 // icmp eq X, (zext (icmp ne X, 1)) --> false
7124 // icmp ne X, (zext (icmp ne X, 1)) --> true
7125 // icmp eq X, (sext (icmp ne X, -1)) --> false
7126 // icmp ne X, (sext (icmp ne X, -1)) --> true
7127 return replaceInstUsesWith(
7128 I, ConstantInt::getBool(I.getType(), Pred1 == ICmpInst::ICMP_NE));
7129 } else if (!IsSExt || HasOneUse) {
7130 // icmp eq X, (zext (icmp eq X, 1)) --> X == 0 || X == 1
7131 // icmp ne X, (zext (icmp eq X, 1)) --> X != 0 && X != 1
7132 // icmp eq X, (sext (icmp eq X, -1)) --> X == 0 || X == -1
7133 // icmp ne X, (sext (icmp eq X, -1)) --> X != 0 && X == -1
7134 return CreateRangeCheck();
7135 }
7136 } else {
7137 // when C != 0 && C != 1:
7138 // icmp eq X, (zext (icmp eq X, C)) --> icmp eq X, 0
7139 // icmp eq X, (zext (icmp ne X, C)) --> icmp eq X, 1
7140 // icmp ne X, (zext (icmp eq X, C)) --> icmp ne X, 0
7141 // icmp ne X, (zext (icmp ne X, C)) --> icmp ne X, 1
7142 // when C != 0 && C != -1:
7143 // icmp eq X, (sext (icmp eq X, C)) --> icmp eq X, 0
7144 // icmp eq X, (sext (icmp ne X, C)) --> icmp eq X, -1
7145 // icmp ne X, (sext (icmp eq X, C)) --> icmp ne X, 0
7146 // icmp ne X, (sext (icmp ne X, C)) --> icmp ne X, -1
7147 return ICmpInst::Create(
7148 Instruction::ICmp, Pred1, X,
7149 ConstantInt::getSigned(X->getType(), Pred2 == ICmpInst::ICMP_NE
7150 ? (IsSExt ? -1 : 1)
7151 : 0));
7152 }
7153 }
7154
7155 return nullptr;
7156}
7157
7158/// If we have an icmp le or icmp ge instruction with a constant operand, turn
7159/// it into the appropriate icmp lt or icmp gt instruction. This transform
7160/// allows them to be folded in visitICmpInst.
7162 ICmpInst::Predicate Pred = I.getPredicate();
7163 if (ICmpInst::isEquality(Pred) || !ICmpInst::isIntPredicate(Pred) ||
7165 return nullptr;
7166
7167 Value *Op0 = I.getOperand(0);
7168 Value *Op1 = I.getOperand(1);
7169 auto *Op1C = dyn_cast<Constant>(Op1);
7170 if (!Op1C)
7171 return nullptr;
7172
7173 auto FlippedStrictness = getFlippedStrictnessPredicateAndConstant(Pred, Op1C);
7174 if (!FlippedStrictness)
7175 return nullptr;
7176
7177 return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
7178}
7179
7180/// If we have a comparison with a non-canonical predicate, if we can update
7181/// all the users, invert the predicate and adjust all the users.
7183 // Is the predicate already canonical?
7184 CmpInst::Predicate Pred = I.getPredicate();
7186 return nullptr;
7187
7188 // Can all users be adjusted to predicate inversion?
7189 if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
7190 return nullptr;
7191
7192 // Ok, we can canonicalize comparison!
7193 // Let's first invert the comparison's predicate.
7194 I.setPredicate(CmpInst::getInversePredicate(Pred));
7195 I.setName(I.getName() + ".not");
7196
7197 // And, adapt users.
7199
7200 return &I;
7201}
7202
7203/// Integer compare with boolean values can always be turned into bitwise ops.
7205 InstCombiner::BuilderTy &Builder) {
7206 Value *A = I.getOperand(0), *B = I.getOperand(1);
7207 assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
7208
7209 // A boolean compared to true/false can be simplified to Op0/true/false in
7210 // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
7211 // Cases not handled by InstSimplify are always 'not' of Op0.
7212 if (match(B, m_Zero())) {
7213 switch (I.getPredicate()) {
7214 case CmpInst::ICMP_EQ: // A == 0 -> !A
7215 case CmpInst::ICMP_ULE: // A <=u 0 -> !A
7216 case CmpInst::ICMP_SGE: // A >=s 0 -> !A
7218 default:
7219 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
7220 }
7221 } else if (match(B, m_One())) {
7222 switch (I.getPredicate()) {
7223 case CmpInst::ICMP_NE: // A != 1 -> !A
7224 case CmpInst::ICMP_ULT: // A <u 1 -> !A
7225 case CmpInst::ICMP_SGT: // A >s -1 -> !A
7227 default:
7228 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
7229 }
7230 }
7231
7232 switch (I.getPredicate()) {
7233 default:
7234 llvm_unreachable("Invalid icmp instruction!");
7235 case ICmpInst::ICMP_EQ:
7236 // icmp eq i1 A, B -> ~(A ^ B)
7237 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
7238
7239 case ICmpInst::ICMP_NE:
7240 // icmp ne i1 A, B -> A ^ B
7241 return BinaryOperator::CreateXor(A, B);
7242
7243 case ICmpInst::ICMP_UGT:
7244 // icmp ugt -> icmp ult
7245 std::swap(A, B);
7246 [[fallthrough]];
7247 case ICmpInst::ICMP_ULT:
7248 // icmp ult i1 A, B -> ~A & B
7249 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
7250
7251 case ICmpInst::ICMP_SGT:
7252 // icmp sgt -> icmp slt
7253 std::swap(A, B);
7254 [[fallthrough]];
7255 case ICmpInst::ICMP_SLT:
7256 // icmp slt i1 A, B -> A & ~B
7257 return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
7258
7259 case ICmpInst::ICMP_UGE:
7260 // icmp uge -> icmp ule
7261 std::swap(A, B);
7262 [[fallthrough]];
7263 case ICmpInst::ICMP_ULE:
7264 // icmp ule i1 A, B -> ~A | B
7265 return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
7266
7267 case ICmpInst::ICMP_SGE:
7268 // icmp sge -> icmp sle
7269 std::swap(A, B);
7270 [[fallthrough]];
7271 case ICmpInst::ICMP_SLE:
7272 // icmp sle i1 A, B -> A | ~B
7273 return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
7274 }
7275}
7276
7277// Transform pattern like:
7278// (1 << Y) u<= X or ~(-1 << Y) u< X or ((1 << Y)+(-1)) u< X
7279// (1 << Y) u> X or ~(-1 << Y) u>= X or ((1 << Y)+(-1)) u>= X
7280// Into:
7281// (X l>> Y) != 0
7282// (X l>> Y) == 0
7284 InstCombiner::BuilderTy &Builder) {
7285 CmpPredicate Pred, NewPred;
7286 Value *X, *Y;
7287 if (match(&Cmp,
7288 m_c_ICmp(Pred, m_OneUse(m_Shl(m_One(), m_Value(Y))), m_Value(X)))) {
7289 switch (Pred) {
7290 case ICmpInst::ICMP_ULE:
7291 NewPred = ICmpInst::ICMP_NE;
7292 break;
7293 case ICmpInst::ICMP_UGT:
7294 NewPred = ICmpInst::ICMP_EQ;
7295 break;
7296 default:
7297 return nullptr;
7298 }
7299 } else if (match(&Cmp, m_c_ICmp(Pred,
7302 m_Add(m_Shl(m_One(), m_Value(Y)),
7303 m_AllOnes()))),
7304 m_Value(X)))) {
7305 // The variant with 'add' is not canonical, (the variant with 'not' is)
7306 // we only get it because it has extra uses, and can't be canonicalized,
7307
7308 switch (Pred) {
7309 case ICmpInst::ICMP_ULT:
7310 NewPred = ICmpInst::ICMP_NE;
7311 break;
7312 case ICmpInst::ICMP_UGE:
7313 NewPred = ICmpInst::ICMP_EQ;
7314 break;
7315 default:
7316 return nullptr;
7317 }
7318 } else
7319 return nullptr;
7320
7321 Value *NewX = Builder.CreateLShr(X, Y, X->getName() + ".highbits");
7322 Constant *Zero = Constant::getNullValue(NewX->getType());
7323 return CmpInst::Create(Instruction::ICmp, NewPred, NewX, Zero);
7324}
7325
7327 InstCombiner::BuilderTy &Builder) {
7328 const CmpInst::Predicate Pred = Cmp.getPredicate();
7329 Value *LHS = Cmp.getOperand(0), *RHS = Cmp.getOperand(1);
7330 Value *V1, *V2;
7331
7332 auto createCmpReverse = [&](CmpInst::Predicate Pred, Value *X, Value *Y) {
7333 Value *V = Builder.CreateCmp(Pred, X, Y, Cmp.getName());
7334 if (auto *I = dyn_cast<Instruction>(V))
7335 I->copyIRFlags(&Cmp);
7336 Module *M = Cmp.getModule();
7338 M, Intrinsic::vector_reverse, V->getType());
7339 return CallInst::Create(F, V);
7340 };
7341
7342 if (match(LHS, m_VecReverse(m_Value(V1)))) {
7343 // cmp Pred, rev(V1), rev(V2) --> rev(cmp Pred, V1, V2)
7344 if (match(RHS, m_VecReverse(m_Value(V2))) &&
7345 (LHS->hasOneUse() || RHS->hasOneUse()))
7346 return createCmpReverse(Pred, V1, V2);
7347
7348 // cmp Pred, rev(V1), RHSSplat --> rev(cmp Pred, V1, RHSSplat)
7349 if (LHS->hasOneUse() && isSplatValue(RHS))
7350 return createCmpReverse(Pred, V1, RHS);
7351 }
7352 // cmp Pred, LHSSplat, rev(V2) --> rev(cmp Pred, LHSSplat, V2)
7353 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
7354 return createCmpReverse(Pred, LHS, V2);
7355
7356 ArrayRef<int> M;
7357 if (!match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(M))))
7358 return nullptr;
7359
7360 // If both arguments of the cmp are shuffles that use the same mask and
7361 // shuffle within a single vector, move the shuffle after the cmp:
7362 // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M
7363 Type *V1Ty = V1->getType();
7364 if (match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(M))) &&
7365 V1Ty == V2->getType() && (LHS->hasOneUse() || RHS->hasOneUse())) {
7366 Value *NewCmp = Builder.CreateCmp(Pred, V1, V2);
7367 return new ShuffleVectorInst(NewCmp, M);
7368 }
7369
7370 // Try to canonicalize compare with splatted operand and splat constant.
7371 // TODO: We could generalize this for more than splats. See/use the code in
7372 // InstCombiner::foldVectorBinop().
7373 Constant *C;
7374 if (!LHS->hasOneUse() || !match(RHS, m_Constant(C)))
7375 return nullptr;
7376
7377 // Length-changing splats are ok, so adjust the constants as needed:
7378 // cmp (shuffle V1, M), C --> shuffle (cmp V1, C'), M
7379 Constant *ScalarC = C->getSplatValue(/* AllowPoison */ true);
7380 int MaskSplatIndex;
7381 if (ScalarC && match(M, m_SplatOrPoisonMask(MaskSplatIndex))) {
7382 // We allow poison in matching, but this transform removes it for safety.
7383 // Demanded elements analysis should be able to recover some/all of that.
7384 C = ConstantVector::getSplat(cast<VectorType>(V1Ty)->getElementCount(),
7385 ScalarC);
7386 SmallVector<int, 8> NewM(M.size(), MaskSplatIndex);
7387 Value *NewCmp = Builder.CreateCmp(Pred, V1, C);
7388 return new ShuffleVectorInst(NewCmp, NewM);
7389 }
7390
7391 return nullptr;
7392}
7393
7394// extract(uadd.with.overflow(A, B), 0) ult A
7395// -> extract(uadd.with.overflow(A, B), 1)
7397 CmpInst::Predicate Pred = I.getPredicate();
7398 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
7399
7400 Value *UAddOv;
7401 Value *A, *B;
7402 auto UAddOvResultPat = m_ExtractValue<0>(
7404 if (match(Op0, UAddOvResultPat) &&
7405 ((Pred == ICmpInst::ICMP_ULT && (Op1 == A || Op1 == B)) ||
7406 (Pred == ICmpInst::ICMP_EQ && match(Op1, m_ZeroInt()) &&
7407 (match(A, m_One()) || match(B, m_One()))) ||
7408 (Pred == ICmpInst::ICMP_NE && match(Op1, m_AllOnes()) &&
7409 (match(A, m_AllOnes()) || match(B, m_AllOnes())))))
7410 // extract(uadd.with.overflow(A, B), 0) < A
7411 // extract(uadd.with.overflow(A, 1), 0) == 0
7412 // extract(uadd.with.overflow(A, -1), 0) != -1
7413 UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
7414 else if (match(Op1, UAddOvResultPat) && Pred == ICmpInst::ICMP_UGT &&
7415 (Op0 == A || Op0 == B))
7416 // A > extract(uadd.with.overflow(A, B), 0)
7417 UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
7418 else
7419 return nullptr;
7420
7421 return ExtractValueInst::Create(UAddOv, 1);
7422}
7423
7425 if (!I.getOperand(0)->getType()->isPointerTy() ||
7427 I.getParent()->getParent(),
7428 I.getOperand(0)->getType()->getPointerAddressSpace())) {
7429 return nullptr;
7430 }
7431 Instruction *Op;
7432 if (match(I.getOperand(0), m_Instruction(Op)) &&
7433 match(I.getOperand(1), m_Zero()) &&
7434 Op->isLaunderOrStripInvariantGroup()) {
7435 return ICmpInst::Create(Instruction::ICmp, I.getPredicate(),
7436 Op->getOperand(0), I.getOperand(1));
7437 }
7438 return nullptr;
7439}
7440
7441/// This function folds patterns produced by lowering of reduce idioms, such as
7442/// llvm.vector.reduce.and which are lowered into instruction chains. This code
7443/// attempts to generate fewer number of scalar comparisons instead of vector
7444/// comparisons when possible.
7446 InstCombiner::BuilderTy &Builder,
7447 const DataLayout &DL) {
7448 if (I.getType()->isVectorTy())
7449 return nullptr;
7450 CmpPredicate OuterPred, InnerPred;
7451 Value *LHS, *RHS;
7452
7453 // Match lowering of @llvm.vector.reduce.and. Turn
7454 /// %vec_ne = icmp ne <8 x i8> %lhs, %rhs
7455 /// %scalar_ne = bitcast <8 x i1> %vec_ne to i8
7456 /// %res = icmp <pred> i8 %scalar_ne, 0
7457 ///
7458 /// into
7459 ///
7460 /// %lhs.scalar = bitcast <8 x i8> %lhs to i64
7461 /// %rhs.scalar = bitcast <8 x i8> %rhs to i64
7462 /// %res = icmp <pred> i64 %lhs.scalar, %rhs.scalar
7463 ///
7464 /// for <pred> in {ne, eq}.
7465 if (!match(&I, m_ICmp(OuterPred,
7467 m_ICmp(InnerPred, m_Value(LHS), m_Value(RHS))))),
7468 m_Zero())))
7469 return nullptr;
7470 auto *LHSTy = dyn_cast<FixedVectorType>(LHS->getType());
7471 if (!LHSTy || !LHSTy->getElementType()->isIntegerTy())
7472 return nullptr;
7473 unsigned NumBits =
7474 LHSTy->getNumElements() * LHSTy->getElementType()->getIntegerBitWidth();
7475 // TODO: Relax this to "not wider than max legal integer type"?
7476 if (!DL.isLegalInteger(NumBits))
7477 return nullptr;
7478
7479 if (ICmpInst::isEquality(OuterPred) && InnerPred == ICmpInst::ICMP_NE) {
7480 auto *ScalarTy = Builder.getIntNTy(NumBits);
7481 LHS = Builder.CreateBitCast(LHS, ScalarTy, LHS->getName() + ".scalar");
7482 RHS = Builder.CreateBitCast(RHS, ScalarTy, RHS->getName() + ".scalar");
7483 return ICmpInst::Create(Instruction::ICmp, OuterPred, LHS, RHS,
7484 I.getName());
7485 }
7486
7487 return nullptr;
7488}
7489
7490// This helper will be called with icmp operands in both orders.
7492 Value *Op0, Value *Op1,
7493 ICmpInst &CxtI) {
7494 // Try to optimize 'icmp GEP, P' or 'icmp P, GEP'.
7495 if (auto *GEP = dyn_cast<GEPOperator>(Op0))
7496 if (Instruction *NI = foldGEPICmp(GEP, Op1, Pred, CxtI))
7497 return NI;
7498
7499 if (auto *SI = dyn_cast<SelectInst>(Op0))
7500 if (Instruction *NI = foldSelectICmp(Pred, SI, Op1, CxtI))
7501 return NI;
7502
7503 if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op0)) {
7504 if (Instruction *Res = foldICmpWithMinMax(CxtI, MinMax, Op1, Pred))
7505 return Res;
7506
7507 if (Instruction *Res = foldICmpWithClamp(CxtI, Op1, MinMax))
7508 return Res;
7509 }
7510
7511 {
7512 Value *X;
7513 const APInt *C;
7514 // icmp X+Cst, X
7515 if (match(Op0, m_Add(m_Value(X), m_APInt(C))) && Op1 == X)
7516 return foldICmpAddOpConst(X, *C, Pred);
7517 }
7518
7519 // abs(X) >= X --> true
7520 // abs(X) u<= X --> true
7521 // abs(X) < X --> false
7522 // abs(X) u> X --> false
7523 // abs(X) u>= X --> IsIntMinPosion ? `X > -1`: `X u<= INTMIN`
7524 // abs(X) <= X --> IsIntMinPosion ? `X > -1`: `X u<= INTMIN`
7525 // abs(X) == X --> IsIntMinPosion ? `X > -1`: `X u<= INTMIN`
7526 // abs(X) u< X --> IsIntMinPosion ? `X < 0` : `X > INTMIN`
7527 // abs(X) > X --> IsIntMinPosion ? `X < 0` : `X > INTMIN`
7528 // abs(X) != X --> IsIntMinPosion ? `X < 0` : `X > INTMIN`
7529 {
7530 Value *X;
7531 Constant *C;
7533 match(Op1, m_Specific(X))) {
7534 Value *NullValue = Constant::getNullValue(X->getType());
7535 Value *AllOnesValue = Constant::getAllOnesValue(X->getType());
7536 const APInt SMin =
7537 APInt::getSignedMinValue(X->getType()->getScalarSizeInBits());
7538 bool IsIntMinPosion = C->isAllOnesValue();
7539 switch (Pred) {
7540 case CmpInst::ICMP_ULE:
7541 case CmpInst::ICMP_SGE:
7542 return replaceInstUsesWith(CxtI, ConstantInt::getTrue(CxtI.getType()));
7543 case CmpInst::ICMP_UGT:
7544 case CmpInst::ICMP_SLT:
7546 case CmpInst::ICMP_UGE:
7547 case CmpInst::ICMP_SLE:
7548 case CmpInst::ICMP_EQ: {
7549 return replaceInstUsesWith(
7550 CxtI, IsIntMinPosion
7551 ? Builder.CreateICmpSGT(X, AllOnesValue)
7552 : Builder.CreateICmpULT(
7553 X, ConstantInt::get(X->getType(), SMin + 1)));
7554 }
7555 case CmpInst::ICMP_ULT:
7556 case CmpInst::ICMP_SGT:
7557 case CmpInst::ICMP_NE: {
7558 return replaceInstUsesWith(
7559 CxtI, IsIntMinPosion
7560 ? Builder.CreateICmpSLT(X, NullValue)
7561 : Builder.CreateICmpUGT(
7562 X, ConstantInt::get(X->getType(), SMin)));
7563 }
7564 default:
7565 llvm_unreachable("Invalid predicate!");
7566 }
7567 }
7568 }
7569
7570 const SimplifyQuery Q = SQ.getWithInstruction(&CxtI);
7571 if (Value *V = foldICmpWithLowBitMaskedVal(Pred, Op0, Op1, Q, *this))
7572 return replaceInstUsesWith(CxtI, V);
7573
7574 // Folding (X / Y) pred X => X swap(pred) 0 for constant Y other than 0 or 1
7575 auto CheckUGT1 = [](const APInt &Divisor) { return Divisor.ugt(1); };
7576 {
7577 if (match(Op0, m_UDiv(m_Specific(Op1), m_CheckedInt(CheckUGT1)))) {
7578 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7580 }
7581
7582 if (!ICmpInst::isUnsigned(Pred) &&
7583 match(Op0, m_SDiv(m_Specific(Op1), m_CheckedInt(CheckUGT1)))) {
7584 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7586 }
7587 }
7588
7589 // Another case of this fold is (X >> Y) pred X => X swap(pred) 0 if Y != 0
7590 auto CheckNE0 = [](const APInt &Shift) { return !Shift.isZero(); };
7591 {
7592 if (match(Op0, m_LShr(m_Specific(Op1), m_CheckedInt(CheckNE0)))) {
7593 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7595 }
7596
7597 if ((Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SGE) &&
7598 match(Op0, m_AShr(m_Specific(Op1), m_CheckedInt(CheckNE0)))) {
7599 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7601 }
7602 }
7603
7604 return nullptr;
7605}
7606
7608 bool Changed = false;
7609 const SimplifyQuery Q = SQ.getWithInstruction(&I);
7610 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
7611 unsigned Op0Cplxity = getComplexity(Op0);
7612 unsigned Op1Cplxity = getComplexity(Op1);
7613
7614 /// Orders the operands of the compare so that they are listed from most
7615 /// complex to least complex. This puts constants before unary operators,
7616 /// before binary operators.
7617 if (Op0Cplxity < Op1Cplxity) {
7618 I.swapOperands();
7619 std::swap(Op0, Op1);
7620 Changed = true;
7621 }
7622
7623 if (Value *V = simplifyICmpInst(I.getCmpPredicate(), Op0, Op1, Q))
7624 return replaceInstUsesWith(I, V);
7625
7626 // Comparing -val or val with non-zero is the same as just comparing val
7627 // ie, abs(val) != 0 -> val != 0
7628 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
7629 Value *Cond, *SelectTrue, *SelectFalse;
7630 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
7631 m_Value(SelectFalse)))) {
7632 if (Value *V = dyn_castNegVal(SelectTrue)) {
7633 if (V == SelectFalse)
7634 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
7635 } else if (Value *V = dyn_castNegVal(SelectFalse)) {
7636 if (V == SelectTrue)
7637 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
7638 }
7639 }
7640 }
7641
7643 return Res;
7644
7645 if (Op0->getType()->isIntOrIntVectorTy(1))
7647 return Res;
7648
7650 return Res;
7651
7653 return Res;
7654
7656 return Res;
7657
7659 return Res;
7660
7662 return Res;
7663
7665 return Res;
7666
7668 return Res;
7669
7670 // Test if the ICmpInst instruction is used exclusively by a select as
7671 // part of a minimum or maximum operation. If so, refrain from doing
7672 // any other folding. This helps out other analyses which understand
7673 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
7674 // and CodeGen. And in this case, at least one of the comparison
7675 // operands has at least one user besides the compare (the select),
7676 // which would often largely negate the benefit of folding anyway.
7677 //
7678 // Do the same for the other patterns recognized by matchSelectPattern.
7679 if (I.hasOneUse())
7680 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
7681 Value *A, *B;
7683 if (SPR.Flavor != SPF_UNKNOWN)
7684 return nullptr;
7685 }
7686
7687 // Do this after checking for min/max to prevent infinite looping.
7688 if (Instruction *Res = foldICmpWithZero(I))
7689 return Res;
7690
7691 // FIXME: We only do this after checking for min/max to prevent infinite
7692 // looping caused by a reverse canonicalization of these patterns for min/max.
7693 // FIXME: The organization of folds is a mess. These would naturally go into
7694 // canonicalizeCmpWithConstant(), but we can't move all of the above folds
7695 // down here after the min/max restriction.
7696 ICmpInst::Predicate Pred = I.getPredicate();
7697 const APInt *C;
7698 if (match(Op1, m_APInt(C))) {
7699 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set
7700 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
7701 Constant *Zero = Constant::getNullValue(Op0->getType());
7702 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
7703 }
7704
7705 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear
7706 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
7708 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
7709 }
7710 }
7711
7712 // The folds in here may rely on wrapping flags and special constants, so
7713 // they can break up min/max idioms in some cases but not seemingly similar
7714 // patterns.
7715 // FIXME: It may be possible to enhance select folding to make this
7716 // unnecessary. It may also be moot if we canonicalize to min/max
7717 // intrinsics.
7718 if (Instruction *Res = foldICmpBinOp(I, Q))
7719 return Res;
7720
7722 return Res;
7723
7724 // Try to match comparison as a sign bit test. Intentionally do this after
7725 // foldICmpInstWithConstant() to potentially let other folds to happen first.
7726 if (Instruction *New = foldSignBitTest(I))
7727 return New;
7728
7729 if (auto *PN = dyn_cast<PHINode>(Op0))
7730 if (Instruction *NV = foldOpIntoPhi(I, PN))
7731 return NV;
7732 if (auto *PN = dyn_cast<PHINode>(Op1))
7733 if (Instruction *NV = foldOpIntoPhi(I, PN))
7734 return NV;
7735
7737 return Res;
7738
7739 if (Instruction *Res = foldICmpCommutative(I.getCmpPredicate(), Op0, Op1, I))
7740 return Res;
7741 if (Instruction *Res =
7742 foldICmpCommutative(I.getSwappedCmpPredicate(), Op1, Op0, I))
7743 return Res;
7744
7745 if (I.isCommutative()) {
7746 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
7747 replaceOperand(I, 0, Pair->first);
7748 replaceOperand(I, 1, Pair->second);
7749 return &I;
7750 }
7751 }
7752
7753 // In case of a comparison with two select instructions having the same
7754 // condition, check whether one of the resulting branches can be simplified.
7755 // If so, just compare the other branch and select the appropriate result.
7756 // For example:
7757 // %tmp1 = select i1 %cmp, i32 %y, i32 %x
7758 // %tmp2 = select i1 %cmp, i32 %z, i32 %x
7759 // %cmp2 = icmp slt i32 %tmp2, %tmp1
7760 // The icmp will result false for the false value of selects and the result
7761 // will depend upon the comparison of true values of selects if %cmp is
7762 // true. Thus, transform this into:
7763 // %cmp = icmp slt i32 %y, %z
7764 // %sel = select i1 %cond, i1 %cmp, i1 false
7765 // This handles similar cases to transform.
7766 {
7767 Value *Cond, *A, *B, *C, *D;
7768 if (match(Op0, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
7770 (Op0->hasOneUse() || Op1->hasOneUse())) {
7771 // Check whether comparison of TrueValues can be simplified
7772 if (Value *Res = simplifyICmpInst(Pred, A, C, SQ)) {
7773 Value *NewICMP = Builder.CreateICmp(Pred, B, D);
7774 return SelectInst::Create(Cond, Res, NewICMP);
7775 }
7776 // Check whether comparison of FalseValues can be simplified
7777 if (Value *Res = simplifyICmpInst(Pred, B, D, SQ)) {
7778 Value *NewICMP = Builder.CreateICmp(Pred, A, C);
7779 return SelectInst::Create(Cond, NewICMP, Res);
7780 }
7781 }
7782 }
7783
7784 // icmp slt (sub nsw x, y), (add nsw x, y) --> icmp sgt y, 0
7785 // icmp ult (sub nuw x, y), (add nuw x, y) --> icmp ugt y, 0
7786 // icmp eq (sub nsw/nuw x, y), (add nsw/nuw x, y) --> icmp eq y, 0
7787 {
7788 Value *A, *B;
7789 CmpPredicate CmpPred;
7790 if (match(&I, m_c_ICmp(CmpPred, m_Sub(m_Value(A), m_Value(B)),
7792 auto *I0 = cast<OverflowingBinaryOperator>(Op0);
7793 auto *I1 = cast<OverflowingBinaryOperator>(Op1);
7794 bool I0NUW = I0->hasNoUnsignedWrap();
7795 bool I1NUW = I1->hasNoUnsignedWrap();
7796 bool I0NSW = I0->hasNoSignedWrap();
7797 bool I1NSW = I1->hasNoSignedWrap();
7798 if ((ICmpInst::isUnsigned(Pred) && I0NUW && I1NUW) ||
7799 (ICmpInst::isSigned(Pred) && I0NSW && I1NSW) ||
7800 (ICmpInst::isEquality(Pred) &&
7801 ((I0NUW || I0NSW) && (I1NUW || I1NSW)))) {
7802 return new ICmpInst(CmpPredicate::getSwapped(CmpPred), B,
7803 ConstantInt::get(Op0->getType(), 0));
7804 }
7805 }
7806 }
7807
7808 // Try to optimize equality comparisons against alloca-based pointers.
7809 if (Op0->getType()->isPointerTy() && I.isEquality()) {
7810 assert(Op1->getType()->isPointerTy() &&
7811 "Comparing pointer with non-pointer?");
7812 if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0)))
7813 if (foldAllocaCmp(Alloca))
7814 return nullptr;
7815 if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1)))
7816 if (foldAllocaCmp(Alloca))
7817 return nullptr;
7818 }
7819
7820 if (Instruction *Res = foldICmpBitCast(I))
7821 return Res;
7822
7823 // TODO: Hoist this above the min/max bailout.
7825 return R;
7826
7827 {
7828 Value *X, *Y;
7829 // Transform (X & ~Y) == 0 --> (X & Y) != 0
7830 // and (X & ~Y) != 0 --> (X & Y) == 0
7831 // if A is a power of 2.
7832 if (match(Op0, m_And(m_Value(X), m_Not(m_Value(Y)))) &&
7833 match(Op1, m_Zero()) && isKnownToBeAPowerOfTwo(X, false, &I) &&
7834 I.isEquality())
7835 return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(X, Y),
7836 Op1);
7837
7838 // Op0 pred Op1 -> ~Op1 pred ~Op0, if this allows us to drop an instruction.
7839 if (Op0->getType()->isIntOrIntVectorTy()) {
7840 bool ConsumesOp0, ConsumesOp1;
7841 if (isFreeToInvert(Op0, Op0->hasOneUse(), ConsumesOp0) &&
7842 isFreeToInvert(Op1, Op1->hasOneUse(), ConsumesOp1) &&
7843 (ConsumesOp0 || ConsumesOp1)) {
7844 Value *InvOp0 = getFreelyInverted(Op0, Op0->hasOneUse(), &Builder);
7845 Value *InvOp1 = getFreelyInverted(Op1, Op1->hasOneUse(), &Builder);
7846 assert(InvOp0 && InvOp1 &&
7847 "Mismatch between isFreeToInvert and getFreelyInverted");
7848 return new ICmpInst(I.getSwappedPredicate(), InvOp0, InvOp1);
7849 }
7850 }
7851
7852 Instruction *AddI = nullptr;
7854 m_Instruction(AddI))) &&
7855 isa<IntegerType>(X->getType())) {
7856 Value *Result;
7857 Constant *Overflow;
7858 // m_UAddWithOverflow can match patterns that do not include an explicit
7859 // "add" instruction, so check the opcode of the matched op.
7860 if (AddI->getOpcode() == Instruction::Add &&
7861 OptimizeOverflowCheck(Instruction::Add, /*Signed*/ false, X, Y, *AddI,
7862 Result, Overflow)) {
7863 replaceInstUsesWith(*AddI, Result);
7864 eraseInstFromFunction(*AddI);
7865 return replaceInstUsesWith(I, Overflow);
7866 }
7867 }
7868
7869 // (zext X) * (zext Y) --> llvm.umul.with.overflow.
7870 if (match(Op0, m_NUWMul(m_ZExt(m_Value(X)), m_ZExt(m_Value(Y)))) &&
7871 match(Op1, m_APInt(C))) {
7872 if (Instruction *R = processUMulZExtIdiom(I, Op0, C, *this))
7873 return R;
7874 }
7875
7876 // Signbit test folds
7877 // Fold (X u>> BitWidth - 1 Pred ZExt(i1)) --> X s< 0 Pred i1
7878 // Fold (X s>> BitWidth - 1 Pred SExt(i1)) --> X s< 0 Pred i1
7879 Instruction *ExtI;
7880 if ((I.isUnsigned() || I.isEquality()) &&
7881 match(Op1,
7883 Y->getType()->getScalarSizeInBits() == 1 &&
7884 (Op0->hasOneUse() || Op1->hasOneUse())) {
7885 unsigned OpWidth = Op0->getType()->getScalarSizeInBits();
7886 Instruction *ShiftI;
7887 if (match(Op0, m_CombineAnd(m_Instruction(ShiftI),
7889 OpWidth - 1))))) {
7890 unsigned ExtOpc = ExtI->getOpcode();
7891 unsigned ShiftOpc = ShiftI->getOpcode();
7892 if ((ExtOpc == Instruction::ZExt && ShiftOpc == Instruction::LShr) ||
7893 (ExtOpc == Instruction::SExt && ShiftOpc == Instruction::AShr)) {
7894 Value *SLTZero =
7895 Builder.CreateICmpSLT(X, Constant::getNullValue(X->getType()));
7896 Value *Cmp = Builder.CreateICmp(Pred, SLTZero, Y, I.getName());
7897 return replaceInstUsesWith(I, Cmp);
7898 }
7899 }
7900 }
7901 }
7902
7903 if (Instruction *Res = foldICmpEquality(I))
7904 return Res;
7905
7907 return Res;
7908
7909 if (Instruction *Res = foldICmpOfUAddOv(I))
7910 return Res;
7911
7912 // The 'cmpxchg' instruction returns an aggregate containing the old value and
7913 // an i1 which indicates whether or not we successfully did the swap.
7914 //
7915 // Replace comparisons between the old value and the expected value with the
7916 // indicator that 'cmpxchg' returns.
7917 //
7918 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to
7919 // spuriously fail. In those cases, the old value may equal the expected
7920 // value but it is possible for the swap to not occur.
7921 if (I.getPredicate() == ICmpInst::ICMP_EQ)
7922 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
7923 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
7924 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
7925 !ACXI->isWeak())
7926 return ExtractValueInst::Create(ACXI, 1);
7927
7929 return Res;
7930
7931 if (I.getType()->isVectorTy())
7932 if (Instruction *Res = foldVectorCmp(I, Builder))
7933 return Res;
7934
7936 return Res;
7937
7939 return Res;
7940
7941 {
7942 Value *A;
7943 const APInt *C1, *C2;
7944 ICmpInst::Predicate Pred = I.getPredicate();
7945 if (ICmpInst::isEquality(Pred)) {
7946 // sext(a) & c1 == c2 --> a & c3 == trunc(c2)
7947 // sext(a) & c1 != c2 --> a & c3 != trunc(c2)
7948 if (match(Op0, m_And(m_SExt(m_Value(A)), m_APInt(C1))) &&
7949 match(Op1, m_APInt(C2))) {
7950 Type *InputTy = A->getType();
7951 unsigned InputBitWidth = InputTy->getScalarSizeInBits();
7952 // c2 must be non-negative at the bitwidth of a.
7953 if (C2->getActiveBits() < InputBitWidth) {
7954 APInt TruncC1 = C1->trunc(InputBitWidth);
7955 // Check if there are 1s in C1 high bits of size InputBitWidth.
7956 if (C1->uge(APInt::getOneBitSet(C1->getBitWidth(), InputBitWidth)))
7957 TruncC1.setBit(InputBitWidth - 1);
7958 Value *AndInst = Builder.CreateAnd(A, TruncC1);
7959 return new ICmpInst(
7960 Pred, AndInst,
7961 ConstantInt::get(InputTy, C2->trunc(InputBitWidth)));
7962 }
7963 }
7964 }
7965 }
7966
7967 return Changed ? &I : nullptr;
7968}
7969
7970/// Fold fcmp ([us]itofp x, cst) if possible.
7972 Instruction *LHSI,
7973 Constant *RHSC) {
7974 const APFloat *RHS;
7975 if (!match(RHSC, m_APFloat(RHS)))
7976 return nullptr;
7977
7978 // Get the width of the mantissa. We don't want to hack on conversions that
7979 // might lose information from the integer, e.g. "i64 -> float"
7980 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
7981 if (MantissaWidth == -1)
7982 return nullptr; // Unknown.
7983
7984 Type *IntTy = LHSI->getOperand(0)->getType();
7985 unsigned IntWidth = IntTy->getScalarSizeInBits();
7986 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
7987
7988 if (I.isEquality()) {
7989 FCmpInst::Predicate P = I.getPredicate();
7990 bool IsExact = false;
7991 APSInt RHSCvt(IntWidth, LHSUnsigned);
7992 RHS->convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
7993
7994 // If the floating point constant isn't an integer value, we know if we will
7995 // ever compare equal / not equal to it.
7996 if (!IsExact) {
7997 // TODO: Can never be -0.0 and other non-representable values
7998 APFloat RHSRoundInt(*RHS);
8000 if (*RHS != RHSRoundInt) {
8002 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8003
8005 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8006 }
8007 }
8008
8009 // TODO: If the constant is exactly representable, is it always OK to do
8010 // equality compares as integer?
8011 }
8012
8013 // Check to see that the input is converted from an integer type that is small
8014 // enough that preserves all bits. TODO: check here for "known" sign bits.
8015 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
8016
8017 // Following test does NOT adjust IntWidth downwards for signed inputs,
8018 // because the most negative value still requires all the mantissa bits
8019 // to distinguish it from one less than that value.
8020 if ((int)IntWidth > MantissaWidth) {
8021 // Conversion would lose accuracy. Check if loss can impact comparison.
8022 int Exp = ilogb(*RHS);
8023 if (Exp == APFloat::IEK_Inf) {
8024 int MaxExponent = ilogb(APFloat::getLargest(RHS->getSemantics()));
8025 if (MaxExponent < (int)IntWidth - !LHSUnsigned)
8026 // Conversion could create infinity.
8027 return nullptr;
8028 } else {
8029 // Note that if RHS is zero or NaN, then Exp is negative
8030 // and first condition is trivially false.
8031 if (MantissaWidth <= Exp && Exp <= (int)IntWidth - !LHSUnsigned)
8032 // Conversion could affect comparison.
8033 return nullptr;
8034 }
8035 }
8036
8037 // Otherwise, we can potentially simplify the comparison. We know that it
8038 // will always come through as an integer value and we know the constant is
8039 // not a NAN (it would have been previously simplified).
8040 assert(!RHS->isNaN() && "NaN comparison not already folded!");
8041
8043 switch (I.getPredicate()) {
8044 default:
8045 llvm_unreachable("Unexpected predicate!");
8046 case FCmpInst::FCMP_UEQ:
8047 case FCmpInst::FCMP_OEQ:
8048 Pred = ICmpInst::ICMP_EQ;
8049 break;
8050 case FCmpInst::FCMP_UGT:
8051 case FCmpInst::FCMP_OGT:
8052 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
8053 break;
8054 case FCmpInst::FCMP_UGE:
8055 case FCmpInst::FCMP_OGE:
8056 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
8057 break;
8058 case FCmpInst::FCMP_ULT:
8059 case FCmpInst::FCMP_OLT:
8060 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
8061 break;
8062 case FCmpInst::FCMP_ULE:
8063 case FCmpInst::FCMP_OLE:
8064 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
8065 break;
8066 case FCmpInst::FCMP_UNE:
8067 case FCmpInst::FCMP_ONE:
8068 Pred = ICmpInst::ICMP_NE;
8069 break;
8070 case FCmpInst::FCMP_ORD:
8071 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8072 case FCmpInst::FCMP_UNO:
8073 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8074 }
8075
8076 // Now we know that the APFloat is a normal number, zero or inf.
8077
8078 // See if the FP constant is too large for the integer. For example,
8079 // comparing an i8 to 300.0.
8080 if (!LHSUnsigned) {
8081 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
8082 // and large values.
8083 APFloat SMax(RHS->getSemantics());
8084 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
8086 if (SMax < *RHS) { // smax < 13123.0
8087 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
8088 Pred == ICmpInst::ICMP_SLE)
8089 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8090 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8091 }
8092 } else {
8093 // If the RHS value is > UnsignedMax, fold the comparison. This handles
8094 // +INF and large values.
8095 APFloat UMax(RHS->getSemantics());
8096 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
8098 if (UMax < *RHS) { // umax < 13123.0
8099 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
8100 Pred == ICmpInst::ICMP_ULE)
8101 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8102 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8103 }
8104 }
8105
8106 if (!LHSUnsigned) {
8107 // See if the RHS value is < SignedMin.
8108 APFloat SMin(RHS->getSemantics());
8109 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
8111 if (SMin > *RHS) { // smin > 12312.0
8112 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
8113 Pred == ICmpInst::ICMP_SGE)
8114 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8115 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8116 }
8117 } else {
8118 // See if the RHS value is < UnsignedMin.
8119 APFloat UMin(RHS->getSemantics());
8120 UMin.convertFromAPInt(APInt::getMinValue(IntWidth), false,
8122 if (UMin > *RHS) { // umin > 12312.0
8123 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
8124 Pred == ICmpInst::ICMP_UGE)
8125 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8126 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8127 }
8128 }
8129
8130 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
8131 // [0, UMAX], but it may still be fractional. Check whether this is the case
8132 // using the IsExact flag.
8133 // Don't do this for zero, because -0.0 is not fractional.
8134 APSInt RHSInt(IntWidth, LHSUnsigned);
8135 bool IsExact;
8136 RHS->convertToInteger(RHSInt, APFloat::rmTowardZero, &IsExact);
8137 if (!RHS->isZero()) {
8138 if (!IsExact) {
8139 // If we had a comparison against a fractional value, we have to adjust
8140 // the compare predicate and sometimes the value. RHSC is rounded towards
8141 // zero at this point.
8142 switch (Pred) {
8143 default:
8144 llvm_unreachable("Unexpected integer comparison!");
8145 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
8146 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8147 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
8148 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8149 case ICmpInst::ICMP_ULE:
8150 // (float)int <= 4.4 --> int <= 4
8151 // (float)int <= -4.4 --> false
8152 if (RHS->isNegative())
8153 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8154 break;
8155 case ICmpInst::ICMP_SLE:
8156 // (float)int <= 4.4 --> int <= 4
8157 // (float)int <= -4.4 --> int < -4
8158 if (RHS->isNegative())
8159 Pred = ICmpInst::ICMP_SLT;
8160 break;
8161 case ICmpInst::ICMP_ULT:
8162 // (float)int < -4.4 --> false
8163 // (float)int < 4.4 --> int <= 4
8164 if (RHS->isNegative())
8165 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8166 Pred = ICmpInst::ICMP_ULE;
8167 break;
8168 case ICmpInst::ICMP_SLT:
8169 // (float)int < -4.4 --> int < -4
8170 // (float)int < 4.4 --> int <= 4
8171 if (!RHS->isNegative())
8172 Pred = ICmpInst::ICMP_SLE;
8173 break;
8174 case ICmpInst::ICMP_UGT:
8175 // (float)int > 4.4 --> int > 4
8176 // (float)int > -4.4 --> true
8177 if (RHS->isNegative())
8178 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8179 break;
8180 case ICmpInst::ICMP_SGT:
8181 // (float)int > 4.4 --> int > 4
8182 // (float)int > -4.4 --> int >= -4
8183 if (RHS->isNegative())
8184 Pred = ICmpInst::ICMP_SGE;
8185 break;
8186 case ICmpInst::ICMP_UGE:
8187 // (float)int >= -4.4 --> true
8188 // (float)int >= 4.4 --> int > 4
8189 if (RHS->isNegative())
8190 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8191 Pred = ICmpInst::ICMP_UGT;
8192 break;
8193 case ICmpInst::ICMP_SGE:
8194 // (float)int >= -4.4 --> int >= -4
8195 // (float)int >= 4.4 --> int > 4
8196 if (!RHS->isNegative())
8197 Pred = ICmpInst::ICMP_SGT;
8198 break;
8199 }
8200 }
8201 }
8202
8203 // Lower this FP comparison into an appropriate integer version of the
8204 // comparison.
8205 return new ICmpInst(Pred, LHSI->getOperand(0),
8206 ConstantInt::get(LHSI->getOperand(0)->getType(), RHSInt));
8207}
8208
8209/// Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
8211 Constant *RHSC) {
8212 // When C is not 0.0 and infinities are not allowed:
8213 // (C / X) < 0.0 is a sign-bit test of X
8214 // (C / X) < 0.0 --> X < 0.0 (if C is positive)
8215 // (C / X) < 0.0 --> X > 0.0 (if C is negative, swap the predicate)
8216 //
8217 // Proof:
8218 // Multiply (C / X) < 0.0 by X * X / C.
8219 // - X is non zero, if it is the flag 'ninf' is violated.
8220 // - C defines the sign of X * X * C. Thus it also defines whether to swap
8221 // the predicate. C is also non zero by definition.
8222 //
8223 // Thus X * X / C is non zero and the transformation is valid. [qed]
8224
8225 FCmpInst::Predicate Pred = I.getPredicate();
8226
8227 // Check that predicates are valid.
8228 if ((Pred != FCmpInst::FCMP_OGT) && (Pred != FCmpInst::FCMP_OLT) &&
8229 (Pred != FCmpInst::FCMP_OGE) && (Pred != FCmpInst::FCMP_OLE))
8230 return nullptr;
8231
8232 // Check that RHS operand is zero.
8233 if (!match(RHSC, m_AnyZeroFP()))
8234 return nullptr;
8235
8236 // Check fastmath flags ('ninf').
8237 if (!LHSI->hasNoInfs() || !I.hasNoInfs())
8238 return nullptr;
8239
8240 // Check the properties of the dividend. It must not be zero to avoid a
8241 // division by zero (see Proof).
8242 const APFloat *C;
8243 if (!match(LHSI->getOperand(0), m_APFloat(C)))
8244 return nullptr;
8245
8246 if (C->isZero())
8247 return nullptr;
8248
8249 // Get swapped predicate if necessary.
8250 if (C->isNegative())
8251 Pred = I.getSwappedPredicate();
8252
8253 return new FCmpInst(Pred, LHSI->getOperand(1), RHSC, "", &I);
8254}
8255
8256// Transform 'fptrunc(x) cmp C' to 'x cmp ext(C)' if possible.
8257// Patterns include:
8258// fptrunc(x) < C --> x < ext(C)
8259// fptrunc(x) <= C --> x <= ext(C)
8260// fptrunc(x) > C --> x > ext(C)
8261// fptrunc(x) >= C --> x >= ext(C)
8262// where 'ext(C)' is the extension of 'C' to the type of 'x' with a small bias
8263// due to precision loss.
8265 const Constant &C) {
8266 FCmpInst::Predicate Pred = I.getPredicate();
8267 bool RoundDown = false;
8268
8269 if (Pred == FCmpInst::FCMP_OGE || Pred == FCmpInst::FCMP_UGE ||
8270 Pred == FCmpInst::FCMP_OLT || Pred == FCmpInst::FCMP_ULT)
8271 RoundDown = true;
8272 else if (Pred == FCmpInst::FCMP_OGT || Pred == FCmpInst::FCMP_UGT ||
8273 Pred == FCmpInst::FCMP_OLE || Pred == FCmpInst::FCMP_ULE)
8274 RoundDown = false;
8275 else
8276 return nullptr;
8277
8278 const APFloat *CValue;
8279 if (!match(&C, m_APFloat(CValue)))
8280 return nullptr;
8281
8282 if (CValue->isNaN() || CValue->isInfinity())
8283 return nullptr;
8284
8285 auto ConvertFltSema = [](const APFloat &Src, const fltSemantics &Sema) {
8286 bool LosesInfo;
8287 APFloat Dest = Src;
8288 Dest.convert(Sema, APFloat::rmNearestTiesToEven, &LosesInfo);
8289 return Dest;
8290 };
8291
8292 auto NextValue = [](const APFloat &Value, bool RoundDown) {
8293 APFloat NextValue = Value;
8294 NextValue.next(RoundDown);
8295 return NextValue;
8296 };
8297
8298 APFloat NextCValue = NextValue(*CValue, RoundDown);
8299
8300 Type *DestType = FPTrunc.getOperand(0)->getType();
8301 const fltSemantics &DestFltSema =
8302 DestType->getScalarType()->getFltSemantics();
8303
8304 APFloat ExtCValue = ConvertFltSema(*CValue, DestFltSema);
8305 APFloat ExtNextCValue = ConvertFltSema(NextCValue, DestFltSema);
8306
8307 // When 'NextCValue' is infinity, use an imaged 'NextCValue' that equals
8308 // 'CValue + bias' to avoid the infinity after conversion. The bias is
8309 // estimated as 'CValue - PrevCValue', where 'PrevCValue' is the previous
8310 // value of 'CValue'.
8311 if (NextCValue.isInfinity()) {
8312 APFloat PrevCValue = NextValue(*CValue, !RoundDown);
8313 APFloat Bias = ConvertFltSema(*CValue - PrevCValue, DestFltSema);
8314
8315 ExtNextCValue = ExtCValue + Bias;
8316 }
8317
8318 APFloat ExtMidValue =
8319 scalbn(ExtCValue + ExtNextCValue, -1, APFloat::rmNearestTiesToEven);
8320
8321 const fltSemantics &SrcFltSema =
8322 C.getType()->getScalarType()->getFltSemantics();
8323
8324 // 'MidValue' might be rounded to 'NextCValue'. Correct it here.
8325 APFloat MidValue = ConvertFltSema(ExtMidValue, SrcFltSema);
8326 if (MidValue != *CValue)
8327 ExtMidValue.next(!RoundDown);
8328
8329 // Check whether 'ExtMidValue' is a valid result since the assumption on
8330 // imaged 'NextCValue' might not hold for new float types.
8331 // ppc_fp128 can't pass here when converting from max float because of
8332 // APFloat implementation.
8333 if (NextCValue.isInfinity()) {
8334 // ExtMidValue --- narrowed ---> Finite
8335 if (ConvertFltSema(ExtMidValue, SrcFltSema).isInfinity())
8336 return nullptr;
8337
8338 // NextExtMidValue --- narrowed ---> Infinity
8339 APFloat NextExtMidValue = NextValue(ExtMidValue, RoundDown);
8340 if (ConvertFltSema(NextExtMidValue, SrcFltSema).isFinite())
8341 return nullptr;
8342 }
8343
8344 return new FCmpInst(Pred, FPTrunc.getOperand(0),
8345 ConstantFP::get(DestType, ExtMidValue), "", &I);
8346}
8347
8348/// Optimize fabs(X) compared with zero.
8350 Value *X;
8351 if (!match(I.getOperand(0), m_FAbs(m_Value(X))))
8352 return nullptr;
8353
8354 const APFloat *C;
8355 if (!match(I.getOperand(1), m_APFloat(C)))
8356 return nullptr;
8357
8358 if (!C->isPosZero()) {
8359 if (!C->isSmallestNormalized())
8360 return nullptr;
8361
8362 const Function *F = I.getFunction();
8363 DenormalMode Mode = F->getDenormalMode(C->getSemantics());
8364 if (Mode.Input == DenormalMode::PreserveSign ||
8366
8367 auto replaceFCmp = [](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
8368 Constant *Zero = ConstantFP::getZero(X->getType());
8369 return new FCmpInst(P, X, Zero, "", I);
8370 };
8371
8372 switch (I.getPredicate()) {
8373 case FCmpInst::FCMP_OLT:
8374 // fcmp olt fabs(x), smallest_normalized_number -> fcmp oeq x, 0.0
8375 return replaceFCmp(&I, FCmpInst::FCMP_OEQ, X);
8376 case FCmpInst::FCMP_UGE:
8377 // fcmp uge fabs(x), smallest_normalized_number -> fcmp une x, 0.0
8378 return replaceFCmp(&I, FCmpInst::FCMP_UNE, X);
8379 case FCmpInst::FCMP_OGE:
8380 // fcmp oge fabs(x), smallest_normalized_number -> fcmp one x, 0.0
8381 return replaceFCmp(&I, FCmpInst::FCMP_ONE, X);
8382 case FCmpInst::FCMP_ULT:
8383 // fcmp ult fabs(x), smallest_normalized_number -> fcmp ueq x, 0.0
8384 return replaceFCmp(&I, FCmpInst::FCMP_UEQ, X);
8385 default:
8386 break;
8387 }
8388 }
8389
8390 return nullptr;
8391 }
8392
8393 auto replacePredAndOp0 = [&IC](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
8394 I->setPredicate(P);
8395 return IC.replaceOperand(*I, 0, X);
8396 };
8397
8398 switch (I.getPredicate()) {
8399 case FCmpInst::FCMP_UGE:
8400 case FCmpInst::FCMP_OLT:
8401 // fabs(X) >= 0.0 --> true
8402 // fabs(X) < 0.0 --> false
8403 llvm_unreachable("fcmp should have simplified");
8404
8405 case FCmpInst::FCMP_OGT:
8406 // fabs(X) > 0.0 --> X != 0.0
8407 return replacePredAndOp0(&I, FCmpInst::FCMP_ONE, X);
8408
8409 case FCmpInst::FCMP_UGT:
8410 // fabs(X) u> 0.0 --> X u!= 0.0
8411 return replacePredAndOp0(&I, FCmpInst::FCMP_UNE, X);
8412
8413 case FCmpInst::FCMP_OLE:
8414 // fabs(X) <= 0.0 --> X == 0.0
8415 return replacePredAndOp0(&I, FCmpInst::FCMP_OEQ, X);
8416
8417 case FCmpInst::FCMP_ULE:
8418 // fabs(X) u<= 0.0 --> X u== 0.0
8419 return replacePredAndOp0(&I, FCmpInst::FCMP_UEQ, X);
8420
8421 case FCmpInst::FCMP_OGE:
8422 // fabs(X) >= 0.0 --> !isnan(X)
8423 assert(!I.hasNoNaNs() && "fcmp should have simplified");
8424 return replacePredAndOp0(&I, FCmpInst::FCMP_ORD, X);
8425
8426 case FCmpInst::FCMP_ULT:
8427 // fabs(X) u< 0.0 --> isnan(X)
8428 assert(!I.hasNoNaNs() && "fcmp should have simplified");
8429 return replacePredAndOp0(&I, FCmpInst::FCMP_UNO, X);
8430
8431 case FCmpInst::FCMP_OEQ:
8432 case FCmpInst::FCMP_UEQ:
8433 case FCmpInst::FCMP_ONE:
8434 case FCmpInst::FCMP_UNE:
8435 case FCmpInst::FCMP_ORD:
8436 case FCmpInst::FCMP_UNO:
8437 // Look through the fabs() because it doesn't change anything but the sign.
8438 // fabs(X) == 0.0 --> X == 0.0,
8439 // fabs(X) != 0.0 --> X != 0.0
8440 // isnan(fabs(X)) --> isnan(X)
8441 // !isnan(fabs(X) --> !isnan(X)
8442 return replacePredAndOp0(&I, I.getPredicate(), X);
8443
8444 default:
8445 return nullptr;
8446 }
8447}
8448
8449/// Optimize sqrt(X) compared with zero.
8451 Value *X;
8452 if (!match(I.getOperand(0), m_Sqrt(m_Value(X))))
8453 return nullptr;
8454
8455 if (!match(I.getOperand(1), m_PosZeroFP()))
8456 return nullptr;
8457
8458 auto ReplacePredAndOp0 = [&](FCmpInst::Predicate P) {
8459 I.setPredicate(P);
8460 return IC.replaceOperand(I, 0, X);
8461 };
8462
8463 // Clear ninf flag if sqrt doesn't have it.
8464 if (!cast<Instruction>(I.getOperand(0))->hasNoInfs())
8465 I.setHasNoInfs(false);
8466
8467 switch (I.getPredicate()) {
8468 case FCmpInst::FCMP_OLT:
8469 case FCmpInst::FCMP_UGE:
8470 // sqrt(X) < 0.0 --> false
8471 // sqrt(X) u>= 0.0 --> true
8472 llvm_unreachable("fcmp should have simplified");
8473 case FCmpInst::FCMP_ULT:
8474 case FCmpInst::FCMP_ULE:
8475 case FCmpInst::FCMP_OGT:
8476 case FCmpInst::FCMP_OGE:
8477 case FCmpInst::FCMP_OEQ:
8478 case FCmpInst::FCMP_UNE:
8479 // sqrt(X) u< 0.0 --> X u< 0.0
8480 // sqrt(X) u<= 0.0 --> X u<= 0.0
8481 // sqrt(X) > 0.0 --> X > 0.0
8482 // sqrt(X) >= 0.0 --> X >= 0.0
8483 // sqrt(X) == 0.0 --> X == 0.0
8484 // sqrt(X) u!= 0.0 --> X u!= 0.0
8485 return IC.replaceOperand(I, 0, X);
8486
8487 case FCmpInst::FCMP_OLE:
8488 // sqrt(X) <= 0.0 --> X == 0.0
8489 return ReplacePredAndOp0(FCmpInst::FCMP_OEQ);
8490 case FCmpInst::FCMP_UGT:
8491 // sqrt(X) u> 0.0 --> X u!= 0.0
8492 return ReplacePredAndOp0(FCmpInst::FCMP_UNE);
8493 case FCmpInst::FCMP_UEQ:
8494 // sqrt(X) u== 0.0 --> X u<= 0.0
8495 return ReplacePredAndOp0(FCmpInst::FCMP_ULE);
8496 case FCmpInst::FCMP_ONE:
8497 // sqrt(X) != 0.0 --> X > 0.0
8498 return ReplacePredAndOp0(FCmpInst::FCMP_OGT);
8499 case FCmpInst::FCMP_ORD:
8500 // !isnan(sqrt(X)) --> X >= 0.0
8501 return ReplacePredAndOp0(FCmpInst::FCMP_OGE);
8502 case FCmpInst::FCMP_UNO:
8503 // isnan(sqrt(X)) --> X u< 0.0
8504 return ReplacePredAndOp0(FCmpInst::FCMP_ULT);
8505 default:
8506 llvm_unreachable("Unexpected predicate!");
8507 }
8508}
8509
8511 CmpInst::Predicate Pred = I.getPredicate();
8512 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
8513
8514 // Canonicalize fneg as Op1.
8515 if (match(Op0, m_FNeg(m_Value())) && !match(Op1, m_FNeg(m_Value()))) {
8516 std::swap(Op0, Op1);
8517 Pred = I.getSwappedPredicate();
8518 }
8519
8520 if (!match(Op1, m_FNeg(m_Specific(Op0))))
8521 return nullptr;
8522
8523 // Replace the negated operand with 0.0:
8524 // fcmp Pred Op0, -Op0 --> fcmp Pred Op0, 0.0
8525 Constant *Zero = ConstantFP::getZero(Op0->getType());
8526 return new FCmpInst(Pred, Op0, Zero, "", &I);
8527}
8528
8530 Constant *RHSC, InstCombinerImpl &CI) {
8531 const CmpInst::Predicate Pred = I.getPredicate();
8532 Value *X = LHSI->getOperand(0);
8533 Value *Y = LHSI->getOperand(1);
8534 switch (Pred) {
8535 default:
8536 break;
8537 case FCmpInst::FCMP_UGT:
8538 case FCmpInst::FCMP_ULT:
8539 case FCmpInst::FCMP_UNE:
8540 case FCmpInst::FCMP_OEQ:
8541 case FCmpInst::FCMP_OGE:
8542 case FCmpInst::FCMP_OLE:
8543 // The optimization is not valid if X and Y are infinities of the same
8544 // sign, i.e. the inf - inf = nan case. If the fsub has the ninf or nnan
8545 // flag then we can assume we do not have that case. Otherwise we might be
8546 // able to prove that either X or Y is not infinity.
8547 if (!LHSI->hasNoNaNs() && !LHSI->hasNoInfs() &&
8551 break;
8552
8553 [[fallthrough]];
8554 case FCmpInst::FCMP_OGT:
8555 case FCmpInst::FCMP_OLT:
8556 case FCmpInst::FCMP_ONE:
8557 case FCmpInst::FCMP_UEQ:
8558 case FCmpInst::FCMP_UGE:
8559 case FCmpInst::FCMP_ULE:
8560 // fcmp pred (x - y), 0 --> fcmp pred x, y
8561 if (match(RHSC, m_AnyZeroFP()) &&
8562 I.getFunction()->getDenormalMode(
8563 LHSI->getType()->getScalarType()->getFltSemantics()) ==
8565 CI.replaceOperand(I, 0, X);
8566 CI.replaceOperand(I, 1, Y);
8567 I.setHasNoInfs(LHSI->hasNoInfs());
8568 if (LHSI->hasNoNaNs())
8569 I.setHasNoNaNs(true);
8570 return &I;
8571 }
8572 break;
8573 }
8574
8575 return nullptr;
8576}
8577
8579 InstCombinerImpl &IC) {
8580 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
8581 Type *OpType = LHS->getType();
8582 CmpInst::Predicate Pred = I.getPredicate();
8583
8586
8587 if (!FloorX && !CeilX) {
8590 std::swap(LHS, RHS);
8591 Pred = I.getSwappedPredicate();
8592 }
8593 }
8594
8595 switch (Pred) {
8596 case FCmpInst::FCMP_OLE:
8597 // fcmp ole floor(x), x => fcmp ord x, 0
8598 if (FloorX)
8600 "", &I);
8601 break;
8602 case FCmpInst::FCMP_OGT:
8603 // fcmp ogt floor(x), x => false
8604 if (FloorX)
8605 return IC.replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8606 break;
8607 case FCmpInst::FCMP_OGE:
8608 // fcmp oge ceil(x), x => fcmp ord x, 0
8609 if (CeilX)
8611 "", &I);
8612 break;
8613 case FCmpInst::FCMP_OLT:
8614 // fcmp olt ceil(x), x => false
8615 if (CeilX)
8616 return IC.replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8617 break;
8618 case FCmpInst::FCMP_ULE:
8619 // fcmp ule floor(x), x => true
8620 if (FloorX)
8621 return IC.replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8622 break;
8623 case FCmpInst::FCMP_UGT:
8624 // fcmp ugt floor(x), x => fcmp uno x, 0
8625 if (FloorX)
8627 "", &I);
8628 break;
8629 case FCmpInst::FCMP_UGE:
8630 // fcmp uge ceil(x), x => true
8631 if (CeilX)
8632 return IC.replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8633 break;
8634 case FCmpInst::FCMP_ULT:
8635 // fcmp ult ceil(x), x => fcmp uno x, 0
8636 if (CeilX)
8638 "", &I);
8639 break;
8640 default:
8641 break;
8642 }
8643
8644 return nullptr;
8645}
8646
8648 bool Changed = false;
8649
8650 /// Orders the operands of the compare so that they are listed from most
8651 /// complex to least complex. This puts constants before unary operators,
8652 /// before binary operators.
8653 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
8654 I.swapOperands();
8655 Changed = true;
8656 }
8657
8658 const CmpInst::Predicate Pred = I.getPredicate();
8659 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
8660 if (Value *V = simplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
8661 SQ.getWithInstruction(&I)))
8662 return replaceInstUsesWith(I, V);
8663
8664 // Simplify 'fcmp pred X, X'
8665 Type *OpType = Op0->getType();
8666 assert(OpType == Op1->getType() && "fcmp with different-typed operands?");
8667 if (Op0 == Op1) {
8668 switch (Pred) {
8669 default:
8670 break;
8671 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
8672 case FCmpInst::FCMP_ULT: // True if unordered or less than
8673 case FCmpInst::FCMP_UGT: // True if unordered or greater than
8674 case FCmpInst::FCMP_UNE: // True if unordered or not equal
8675 // Canonicalize these to be 'fcmp uno %X, 0.0'.
8676 I.setPredicate(FCmpInst::FCMP_UNO);
8677 I.setOperand(1, Constant::getNullValue(OpType));
8678 return &I;
8679
8680 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
8681 case FCmpInst::FCMP_OEQ: // True if ordered and equal
8682 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
8683 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
8684 // Canonicalize these to be 'fcmp ord %X, 0.0'.
8685 I.setPredicate(FCmpInst::FCMP_ORD);
8686 I.setOperand(1, Constant::getNullValue(OpType));
8687 return &I;
8688 }
8689 }
8690
8691 if (I.isCommutative()) {
8692 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
8693 replaceOperand(I, 0, Pair->first);
8694 replaceOperand(I, 1, Pair->second);
8695 return &I;
8696 }
8697 }
8698
8699 // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
8700 // then canonicalize the operand to 0.0.
8701 if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
8702 if (!match(Op0, m_PosZeroFP()) &&
8703 isKnownNeverNaN(Op0, getSimplifyQuery().getWithInstruction(&I)))
8704 return replaceOperand(I, 0, ConstantFP::getZero(OpType));
8705
8706 if (!match(Op1, m_PosZeroFP()) &&
8707 isKnownNeverNaN(Op1, getSimplifyQuery().getWithInstruction(&I)))
8708 return replaceOperand(I, 1, ConstantFP::getZero(OpType));
8709 }
8710
8711 // fcmp pred (fneg X), (fneg Y) -> fcmp swap(pred) X, Y
8712 Value *X, *Y;
8713 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
8714 return new FCmpInst(I.getSwappedPredicate(), X, Y, "", &I);
8715
8717 return R;
8718
8719 // Test if the FCmpInst instruction is used exclusively by a select as
8720 // part of a minimum or maximum operation. If so, refrain from doing
8721 // any other folding. This helps out other analyses which understand
8722 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
8723 // and CodeGen. And in this case, at least one of the comparison
8724 // operands has at least one user besides the compare (the select),
8725 // which would often largely negate the benefit of folding anyway.
8726 if (I.hasOneUse())
8727 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
8728 Value *A, *B;
8730 if (SPR.Flavor != SPF_UNKNOWN)
8731 return nullptr;
8732 }
8733
8734 // The sign of 0.0 is ignored by fcmp, so canonicalize to +0.0:
8735 // fcmp Pred X, -0.0 --> fcmp Pred X, 0.0
8736 if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP()))
8737 return replaceOperand(I, 1, ConstantFP::getZero(OpType));
8738
8739 // Canonicalize:
8740 // fcmp olt X, +inf -> fcmp one X, +inf
8741 // fcmp ole X, +inf -> fcmp ord X, 0
8742 // fcmp ogt X, +inf -> false
8743 // fcmp oge X, +inf -> fcmp oeq X, +inf
8744 // fcmp ult X, +inf -> fcmp une X, +inf
8745 // fcmp ule X, +inf -> true
8746 // fcmp ugt X, +inf -> fcmp uno X, 0
8747 // fcmp uge X, +inf -> fcmp ueq X, +inf
8748 // fcmp olt X, -inf -> false
8749 // fcmp ole X, -inf -> fcmp oeq X, -inf
8750 // fcmp ogt X, -inf -> fcmp one X, -inf
8751 // fcmp oge X, -inf -> fcmp ord X, 0
8752 // fcmp ult X, -inf -> fcmp uno X, 0
8753 // fcmp ule X, -inf -> fcmp ueq X, -inf
8754 // fcmp ugt X, -inf -> fcmp une X, -inf
8755 // fcmp uge X, -inf -> true
8756 const APFloat *C;
8757 if (match(Op1, m_APFloat(C)) && C->isInfinity()) {
8758 switch (C->isNegative() ? FCmpInst::getSwappedPredicate(Pred) : Pred) {
8759 default:
8760 break;
8761 case FCmpInst::FCMP_ORD:
8762 case FCmpInst::FCMP_UNO:
8765 case FCmpInst::FCMP_OGT:
8766 case FCmpInst::FCMP_ULE:
8767 llvm_unreachable("Should be simplified by InstSimplify");
8768 case FCmpInst::FCMP_OLT:
8769 return new FCmpInst(FCmpInst::FCMP_ONE, Op0, Op1, "", &I);
8770 case FCmpInst::FCMP_OLE:
8771 return new FCmpInst(FCmpInst::FCMP_ORD, Op0, ConstantFP::getZero(OpType),
8772 "", &I);
8773 case FCmpInst::FCMP_OGE:
8774 return new FCmpInst(FCmpInst::FCMP_OEQ, Op0, Op1, "", &I);
8775 case FCmpInst::FCMP_ULT:
8776 return new FCmpInst(FCmpInst::FCMP_UNE, Op0, Op1, "", &I);
8777 case FCmpInst::FCMP_UGT:
8778 return new FCmpInst(FCmpInst::FCMP_UNO, Op0, ConstantFP::getZero(OpType),
8779 "", &I);
8780 case FCmpInst::FCMP_UGE:
8781 return new FCmpInst(FCmpInst::FCMP_UEQ, Op0, Op1, "", &I);
8782 }
8783 }
8784
8785 // Ignore signbit of bitcasted int when comparing equality to FP 0.0:
8786 // fcmp oeq/une (bitcast X), 0.0 --> (and X, SignMaskC) ==/!= 0
8787 if (match(Op1, m_PosZeroFP()) &&
8790 if (Pred == FCmpInst::FCMP_OEQ)
8791 IntPred = ICmpInst::ICMP_EQ;
8792 else if (Pred == FCmpInst::FCMP_UNE)
8793 IntPred = ICmpInst::ICMP_NE;
8794
8795 if (IntPred != ICmpInst::BAD_ICMP_PREDICATE) {
8796 Type *IntTy = X->getType();
8797 const APInt &SignMask = ~APInt::getSignMask(IntTy->getScalarSizeInBits());
8798 Value *MaskX = Builder.CreateAnd(X, ConstantInt::get(IntTy, SignMask));
8799 return new ICmpInst(IntPred, MaskX, ConstantInt::getNullValue(IntTy));
8800 }
8801 }
8802
8803 // Handle fcmp with instruction LHS and constant RHS.
8804 Instruction *LHSI;
8805 Constant *RHSC;
8806 if (match(Op0, m_Instruction(LHSI)) && match(Op1, m_Constant(RHSC))) {
8807 switch (LHSI->getOpcode()) {
8808 case Instruction::Select:
8809 // fcmp eq (cond ? x : -x), 0 --> fcmp eq x, 0
8810 if (FCmpInst::isEquality(Pred) && match(RHSC, m_AnyZeroFP()) &&
8812 return replaceOperand(I, 0, X);
8814 return NV;
8815 break;
8816 case Instruction::FSub:
8817 if (LHSI->hasOneUse())
8818 if (Instruction *NV = foldFCmpFSubIntoFCmp(I, LHSI, RHSC, *this))
8819 return NV;
8820 break;
8821 case Instruction::PHI:
8822 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
8823 return NV;
8824 break;
8825 case Instruction::SIToFP:
8826 case Instruction::UIToFP:
8827 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
8828 return NV;
8829 break;
8830 case Instruction::FDiv:
8831 if (Instruction *NV = foldFCmpReciprocalAndZero(I, LHSI, RHSC))
8832 return NV;
8833 break;
8834 case Instruction::Load:
8835 if (auto *GEP = dyn_cast<GetElementPtrInst>(LHSI->getOperand(0)))
8836 if (Instruction *Res =
8838 return Res;
8839 break;
8840 case Instruction::FPTrunc:
8841 if (Instruction *NV = foldFCmpFpTrunc(I, *LHSI, *RHSC))
8842 return NV;
8843 break;
8844 }
8845 }
8846
8847 if (Instruction *R = foldFabsWithFcmpZero(I, *this))
8848 return R;
8849
8850 if (Instruction *R = foldSqrtWithFcmpZero(I, *this))
8851 return R;
8852
8853 if (Instruction *R = foldFCmpWithFloorAndCeil(I, *this))
8854 return R;
8855
8856 if (match(Op0, m_FNeg(m_Value(X)))) {
8857 // fcmp pred (fneg X), C --> fcmp swap(pred) X, -C
8858 Constant *C;
8859 if (match(Op1, m_Constant(C)))
8860 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
8861 return new FCmpInst(I.getSwappedPredicate(), X, NegC, "", &I);
8862 }
8863
8864 // fcmp (fadd X, 0.0), Y --> fcmp X, Y
8865 if (match(Op0, m_FAdd(m_Value(X), m_AnyZeroFP())))
8866 return new FCmpInst(Pred, X, Op1, "", &I);
8867
8868 // fcmp X, (fadd Y, 0.0) --> fcmp X, Y
8869 if (match(Op1, m_FAdd(m_Value(Y), m_AnyZeroFP())))
8870 return new FCmpInst(Pred, Op0, Y, "", &I);
8871
8872 if (match(Op0, m_FPExt(m_Value(X)))) {
8873 // fcmp (fpext X), (fpext Y) -> fcmp X, Y
8874 if (match(Op1, m_FPExt(m_Value(Y))) && X->getType() == Y->getType())
8875 return new FCmpInst(Pred, X, Y, "", &I);
8876
8877 const APFloat *C;
8878 if (match(Op1, m_APFloat(C))) {
8879 const fltSemantics &FPSem =
8880 X->getType()->getScalarType()->getFltSemantics();
8881 bool Lossy;
8882 APFloat TruncC = *C;
8883 TruncC.convert(FPSem, APFloat::rmNearestTiesToEven, &Lossy);
8884
8885 if (Lossy) {
8886 // X can't possibly equal the higher-precision constant, so reduce any
8887 // equality comparison.
8888 // TODO: Other predicates can be handled via getFCmpCode().
8889 switch (Pred) {
8890 case FCmpInst::FCMP_OEQ:
8891 // X is ordered and equal to an impossible constant --> false
8892 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8893 case FCmpInst::FCMP_ONE:
8894 // X is ordered and not equal to an impossible constant --> ordered
8895 return new FCmpInst(FCmpInst::FCMP_ORD, X,
8896 ConstantFP::getZero(X->getType()));
8897 case FCmpInst::FCMP_UEQ:
8898 // X is unordered or equal to an impossible constant --> unordered
8899 return new FCmpInst(FCmpInst::FCMP_UNO, X,
8900 ConstantFP::getZero(X->getType()));
8901 case FCmpInst::FCMP_UNE:
8902 // X is unordered or not equal to an impossible constant --> true
8903 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8904 default:
8905 break;
8906 }
8907 }
8908
8909 // fcmp (fpext X), C -> fcmp X, (fptrunc C) if fptrunc is lossless
8910 // Avoid lossy conversions and denormals.
8911 // Zero is a special case that's OK to convert.
8912 APFloat Fabs = TruncC;
8913 Fabs.clearSign();
8914 if (!Lossy &&
8915 (Fabs.isZero() || !(Fabs < APFloat::getSmallestNormalized(FPSem)))) {
8916 Constant *NewC = ConstantFP::get(X->getType(), TruncC);
8917 return new FCmpInst(Pred, X, NewC, "", &I);
8918 }
8919 }
8920 }
8921
8922 // Convert a sign-bit test of an FP value into a cast and integer compare.
8923 // TODO: Simplify if the copysign constant is 0.0 or NaN.
8924 // TODO: Handle non-zero compare constants.
8925 // TODO: Handle other predicates.
8927 m_Value(X)))) &&
8928 match(Op1, m_AnyZeroFP()) && !C->isZero() && !C->isNaN()) {
8929 Type *IntType = Builder.getIntNTy(X->getType()->getScalarSizeInBits());
8930 if (auto *VecTy = dyn_cast<VectorType>(OpType))
8931 IntType = VectorType::get(IntType, VecTy->getElementCount());
8932
8933 // copysign(non-zero constant, X) < 0.0 --> (bitcast X) < 0
8934 if (Pred == FCmpInst::FCMP_OLT) {
8935 Value *IntX = Builder.CreateBitCast(X, IntType);
8936 return new ICmpInst(ICmpInst::ICMP_SLT, IntX,
8937 ConstantInt::getNullValue(IntType));
8938 }
8939 }
8940
8941 {
8942 Value *CanonLHS = nullptr;
8944 // (canonicalize(x) == x) => (x == x)
8945 if (CanonLHS == Op1)
8946 return new FCmpInst(Pred, Op1, Op1, "", &I);
8947
8948 Value *CanonRHS = nullptr;
8950 // (x == canonicalize(x)) => (x == x)
8951 if (CanonRHS == Op0)
8952 return new FCmpInst(Pred, Op0, Op0, "", &I);
8953
8954 // (canonicalize(x) == canonicalize(y)) => (x == y)
8955 if (CanonLHS && CanonRHS)
8956 return new FCmpInst(Pred, CanonLHS, CanonRHS, "", &I);
8957 }
8958
8959 if (I.getType()->isVectorTy())
8960 if (Instruction *Res = foldVectorCmp(I, Builder))
8961 return Res;
8962
8963 return Changed ? &I : nullptr;
8964}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define Check(C,...)
Hexagon Common GEP
static Instruction * foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
static Instruction * foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize fabs(X) compared with zero.
static void collectOffsetOp(Value *V, SmallVectorImpl< OffsetOp > &Offsets, bool AllowRecursion)
static Value * rewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags NW, const DataLayout &DL, SetVector< Value * > &Explored, InstCombiner &IC)
Returns a re-written value of Start as an indexed GEP using Base as a pointer.
static Instruction * foldICmpEqualityWithOffset(ICmpInst &I, InstCombiner::BuilderTy &Builder, const SimplifyQuery &SQ)
Offset both sides of an equality icmp to see if we can save some instructions: icmp eq/ne X,...
static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1+In2, returning true if the result overflowed for this type.
static Instruction * foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldVectorCmp(CmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q, unsigned Depth=0)
static Value * createLogicFromTable(const std::bitset< 4 > &Table, Value *Op0, Value *Op1, IRBuilderBase &Builder, bool HasOneUse)
static Instruction * foldICmpOfUAddOv(ICmpInst &I)
static bool isChainSelectCmpBranch(const SelectInst *SI)
Return true when the instruction sequence within a block is select-cmp-br.
static Instruction * foldICmpInvariantGroup(ICmpInst &I)
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static Instruction * foldReductionIdiom(ICmpInst &I, InstCombiner::BuilderTy &Builder, const DataLayout &DL)
This function folds patterns produced by lowering of reduce idioms, such as llvm.vector....
static Instruction * canonicalizeICmpBool(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Integer compare with boolean values can always be turned into bitwise ops.
static Instruction * foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, Constant *RHSC, InstCombinerImpl &CI)
static Value * foldICmpOrXorSubChain(ICmpInst &Cmp, BinaryOperator *Or, InstCombiner::BuilderTy &Builder)
Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
static bool hasBranchUse(ICmpInst &I)
Given an icmp instruction, return true if any use of this comparison is a branch on sign bit comparis...
static Value * foldICmpWithLowBitMaskedVal(CmpPredicate Pred, Value *Op0, Value *Op1, const SimplifyQuery &Q, InstCombiner &IC)
Some comparisons can be simplified.
static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth)
When performing a comparison against a constant, it is possible that not all the bits in the LHS are ...
static Instruction * foldICmpShlLHSC(ICmpInst &Cmp, Instruction *Shl, const APInt &C)
Fold icmp (shl nuw C2, Y), C.
static Instruction * foldFCmpWithFloorAndCeil(FCmpInst &I, InstCombinerImpl &IC)
static Instruction * foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldICmpOfCmpIntrinsicWithConstant(CmpPredicate Pred, IntrinsicInst *I, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * processUMulZExtIdiom(ICmpInst &I, Value *MulVal, const APInt *OtherVal, InstCombinerImpl &IC)
Recognize and process idiom involving test for multiplication overflow.
static Instruction * foldSqrtWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize sqrt(X) compared with zero.
static Instruction * foldFCmpFNegCommonOp(FCmpInst &I)
static Instruction * foldICmpWithHighBitMask(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static ICmpInst * canonicalizeCmpWithConstant(ICmpInst &I)
If we have an icmp le or icmp ge instruction with a constant operand, turn it into the appropriate ic...
static Instruction * foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
Fold an icmp with LLVM intrinsics.
static Instruction * foldICmpUSubSatOrUAddSatWithConstant(CmpPredicate Pred, SaturatingInst *II, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * foldICmpPow2Test(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1-In2, returning true if the result overflowed for this type.
static bool canRewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags &NW, const DataLayout &DL, SetVector< Value * > &Explored)
Returns true if we can rewrite Start as a GEP with pointer Base and some integer offset.
static Instruction * foldFCmpFpTrunc(FCmpInst &I, const Instruction &FPTrunc, const Constant &C)
static Instruction * foldICmpXNegX(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static Instruction * processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, ConstantInt *CI2, ConstantInt *CI1, InstCombinerImpl &IC)
The caller has matched a pattern of the form: I = icmp ugt (add (add A, B), CI2), CI1 If this is of t...
static Value * foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ, InstCombiner::BuilderTy &Builder)
static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C)
Returns true if the exploded icmp can be expressed as a signed comparison to zero and updates the pre...
static Instruction * transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, const DataLayout &DL, InstCombiner &IC)
Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
static Instruction * foldCtpopPow2Test(ICmpInst &I, IntrinsicInst *CtpopLhs, const APInt &CRhs, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
static void setInsertionPoint(IRBuilder<> &Builder, Value *V, bool Before=true)
static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS, bool IsSigned)
static bool isMultipleOf(Value *X, const APInt &C, const SimplifyQuery &Q)
Return true if X is a multiple of C.
static Value * foldICmpWithTruncSignExtendedVal(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Some comparisons can be simplified.
static Instruction * foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T1
uint64_t IntrinsicInst * II
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
Value * RHS
Value * LHS
BinaryOperator * Mul
static constexpr roundingMode rmTowardZero
Definition APFloat.h:348
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition APFloat.cpp:6053
void clearSign()
Definition APFloat.h:1280
bool isNaN() const
Definition APFloat.h:1429
bool isZero() const
Definition APFloat.h:1427
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
Definition APFloat.h:1140
APInt bitcastToAPInt() const
Definition APFloat.h:1335
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Definition APFloat.h:1120
opStatus next(bool nextDown)
Definition APFloat.h:1236
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1080
LLVM_ABI FPClassTest classify() const
Return the FPClassTest which will return true for the value.
Definition APFloat.cpp:5982
opStatus roundToIntegral(roundingMode RM)
Definition APFloat.h:1230
bool isInfinity() const
Definition APFloat.h:1428
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition APInt.cpp:1573
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1758
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
Definition APInt.h:450
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1012
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition APInt.h:230
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1513
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:207
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
APInt abs() const
Get the absolute value.
Definition APInt.h:1796
unsigned ceilLogBase2() const
Definition APInt.h:1765
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1948
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1183
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
Definition APInt.h:467
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1112
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:210
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
Definition APInt.h:217
bool isNegative() const
Determine sign of this APInt.
Definition APInt.h:330
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1928
bool eq(const APInt &RHS) const
Equality comparison.
Definition APInt.h:1080
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition APInt.cpp:1644
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1935
void negate()
Negate this APInt in place.
Definition APInt.h:1469
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1640
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1599
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition APInt.h:357
void flipAllBits()
Toggle every bit to its opposite value.
Definition APInt.h:1453
unsigned countl_one() const
Count the number of leading one bits.
Definition APInt.h:1616
unsigned logBase2() const
Definition APInt.h:1762
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:828
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:406
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition APInt.h:874
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition APInt.h:297
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition APInt.h:1238
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1941
bool isOne() const
Determine if this is a value of 1.
Definition APInt.h:390
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:852
unsigned countr_one() const
Count the number of trailing one bits.
Definition APInt.h:1657
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1222
An arbitrary precision integer that knows its signedness.
Definition APSInt.h:24
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Conditional or Unconditional Branch instruction.
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
static LLVM_ABI Predicate getFlippedStrictnessPredicate(Predicate pred)
This is a static version that you can use without an instruction available.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
bool isSigned() const
Definition InstrTypes.h:930
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
static LLVM_ABI bool isStrictPredicate(Predicate predicate)
This is a static version that you can use without an instruction available.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isUnsigned() const
Definition InstrTypes.h:936
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPointerBitCastOrAddrSpaceCast(Constant *C, Type *Ty)
Create a BitCast or AddrSpaceCast for a pointer type depending on the address space.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getXor(Constant *C1, Constant *C2)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
Definition Constants.h:264
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:214
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:131
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const
Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI const APInt & getUniqueInteger() const
If C is a constant integer then return its value, otherwise C must be a vector of constant integers,...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate Pred)
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition Operator.h:430
LLVM_ABI Type * getSourceElementType() const
Definition Operator.cpp:71
Value * getPointerOperand()
Definition Operator.h:457
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
Definition Operator.h:504
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1551
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2442
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1573
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:538
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
Instruction * foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, const APInt &C)
Fold icmp ({al}shr X, Y), C.
Instruction * foldICmpWithZextOrSext(ICmpInst &ICmp)
Instruction * foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, ConstantInt *C)
Instruction * foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Instruction * foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
Instruction * foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, const APInt &C)
Fold icmp (or X, Y), C.
Instruction * foldICmpTruncWithTruncOrExt(ICmpInst &Cmp, const SimplifyQuery &Q)
Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
Instruction * foldSignBitTest(ICmpInst &I)
Fold equality-comparison between zero and any (maybe truncated) right-shift by one-less-than-bitwidth...
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside)
Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise (V < Lo || V >= Hi).
Instruction * foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ)
Try to fold icmp (binop), X or icmp X, (binop).
Instruction * foldCmpLoadFromIndexedGlobal(LoadInst *LI, GetElementPtrInst *GEP, CmpInst &ICI, ConstantInt *AndCst=nullptr)
This is called when we see this pattern: cmp pred (load (gep GV, ...)), cmpcst where GV is a global v...
Instruction * foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, const APInt &C)
Fold icmp (sub X, Y), C.
Instruction * foldICmpWithClamp(ICmpInst &Cmp, Value *X, MinMaxIntrinsic *Min)
Match and fold patterns like: icmp eq/ne X, min(max(X, Lo), Hi) which represents a range check and ca...
Instruction * foldICmpInstWithConstantNotInt(ICmpInst &Cmp)
Handle icmp with constant (but not simple integer constant) RHS.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Instruction * foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (shl AP2, A), AP1)" -> (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
Value * reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0, const SimplifyQuery &SQ, bool AnalyzeForSignBitExtraction=false)
Instruction * foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an equality icmp with LLVM intrinsic and constant operand.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Value * foldMultiplicationOverflowCheck(ICmpInst &Cmp)
Fold (-1 u/ x) u< y ((x * y) ?
Instruction * foldICmpWithConstant(ICmpInst &Cmp)
Fold icmp Pred X, C.
CmpInst * canonicalizeICmpPredicate(CmpInst &I)
If we have a comparison with a non-canonical predicate, if we can update all the users,...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldICmpWithZero(ICmpInst &Cmp)
Instruction * foldICmpCommutative(CmpPredicate Pred, Value *Op0, Value *Op1, ICmpInst &CxtI)
Instruction * foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp equality instruction with binary operator LHS and constant RHS: icmp eq/ne BO,...
Instruction * foldICmpUsingBoolRange(ICmpInst &I)
If one operand of an icmp is effectively a bool (value range of {0,1}), then try to reduce patterns b...
Instruction * foldICmpWithTrunc(ICmpInst &Cmp)
Instruction * foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, ConstantInt *&Less, ConstantInt *&Equal, ConstantInt *&Greater)
Match a select chain which produces one of three values based on whether the LHS is less than,...
Instruction * visitFCmpInst(FCmpInst &I)
Instruction * foldICmpUsingKnownBits(ICmpInst &Cmp)
Try to fold the comparison based on range information we can get by checking whether bits are known t...
Instruction * foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, const APInt &C)
Fold icmp ({su}div X, Y), C.
Instruction * foldIRemByPowerOfTwoToBitTest(ICmpInst &I)
If we have: icmp eq/ne (urem/srem x, y), 0 iff y is a power-of-two, we can replace this with a bit te...
Instruction * foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold fcmp ([us]itofp x, cst) if possible.
Instruction * foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Fold icmp (udiv X, Y), C.
Instruction * foldICmpAddOpConst(Value *X, const APInt &C, CmpPredicate Pred)
Fold "icmp pred (X+C), X".
Instruction * foldICmpWithCastOp(ICmpInst &ICmp)
Handle icmp (cast x), (cast or constant).
Instruction * foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, const APInt &C)
Fold icmp (trunc X), C.
Instruction * foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, const APInt &C)
Fold icmp (add X, Y), C.
Instruction * foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, const APInt &C)
Fold icmp (mul X, Y), C.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Instruction * foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
Fold icmp (xor X, Y), C.
Instruction * foldSelectICmp(CmpPredicate Pred, SelectInst *SI, Value *RHS, const ICmpInst &I)
Instruction * foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp, const APInt &C)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldIsMultipleOfAPowerOfTwo(ICmpInst &Cmp)
Fold icmp eq (num + mask) & ~mask, num to icmp eq (and num, mask), 0 Where mask is a low bit mask.
Instruction * foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1, const APInt &C2)
Fold icmp (and (sh X, Y), C2), C1.
Instruction * foldICmpBinOpWithConstantViaTruthTable(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Instruction * foldICmpInstWithConstant(ICmpInst &Cmp)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
For power-of-2 C: ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1) ((X s>> ShiftC) ^ X) u> (C - 1) -...
Instruction * foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, const APInt &C)
Fold icmp (shl X, Y), C.
Instruction * foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, const APInt &C)
Fold icmp (and X, Y), C.
Instruction * foldICmpEquality(ICmpInst &Cmp)
Instruction * foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax, Value *Z, CmpPredicate Pred)
Fold icmp Pred min|max(X, Y), Z.
bool dominatesAllUses(const Instruction *DI, const Instruction *UI, const BasicBlock *DB) const
True when DB dominates all uses of DI except UI.
bool foldAllocaCmp(AllocaInst *Alloca)
Instruction * visitICmpInst(ICmpInst &I)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * foldICmpWithDominatingICmp(ICmpInst &Cmp)
Canonicalize icmp instructions based on dominating conditions.
bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, const unsigned SIOpd)
Try to replace select with select operand SIOpd in SI-ICmp sequence.
Instruction * foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> (icmp eq/ne A, Log2(AP2/AP1)) -> (icmp eq/ne A,...
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
Instruction * foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1)
Fold icmp (and X, C2), C1.
Instruction * foldICmpBitCast(ICmpInst &Cmp)
Instruction * foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, Instruction &I)
Fold comparisons between a GEP instruction and something else.
The core instruction combiner logic.
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
SimplifyQuery SQ
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI, bool IsNSW=false) const
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
const DataLayout & DL
DomConditionCache DC
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ?
void addToWorklist(Instruction *I)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
BuilderTy & Builder
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoInfs() const LLVM_READONLY
Determine whether the no-infs flag is set.
bool isArithmeticShift() const
Return true if this is an arithmetic shift right.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isShift() const
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
This class represents min/max intrinsics.
Value * getLHS() const
Value * getRHS() const
static bool isMin(Intrinsic::ID ID)
Whether the intrinsic is a smin or umin.
static bool isSigned(Intrinsic::ID ID)
Whether the intrinsic is signed or unsigned.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
A vector that has set insertion semantics.
Definition SetVector.h:58
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:101
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:149
bool contains(const key_type &key) const
Check if the SetVector contains the given key.
Definition SetVector.h:250
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class represents a truncation of integer types.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
Definition Type.h:165
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
LLVM_ABI int getFPMantissaWidth() const
Return the width of the mantissa of this type.
Definition Type.cpp:235
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:106
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
void setOperand(unsigned i, Value *Val)
Definition User.h:237
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
Definition Value.cpp:158
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
iterator_range< use_iterator > uses()
Definition Value.h:380
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A unsign-divided by B, rounded by the given rounding mode.
Definition APInt.cpp:2763
LLVM_ABI APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A sign-divided by B, rounded by the given rounding mode.
Definition APInt.cpp:2781
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
NoWrapTrunc_match< OpTy, TruncInst::NoSignedWrap > m_NSWTrunc(const OpTy &Op)
Matches trunc nsw.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< is_negated_power2_or_zero > m_NegatedPower2OrZero()
Match a integer or vector negated power-of-2.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
cst_pred_ty< is_lowbit_mask_or_zero > m_LowBitMaskOrZero()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Signum_match< Val_t > m_Signum(const Val_t &V)
Matches a signum pattern.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
UAddWithOverflow_match< LHS_t, RHS_t, Sum_t > m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S)
Match an icmp instruction checking for unsigned overflow on addition.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_unless< Ty > m_Unless(const Ty &M)
Match if the inner matcher does NOT match.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI Value * stripNullTest(Value *V)
Returns the inner value X if the expression has the form f(X) where f(X) == 0 if and only if X == 0,...
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
LLVM_ABI Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
Definition APFloat.h:1516
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition bit.h:236
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_UNKNOWN
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI LinearExpression decomposeLinearExpression(const DataLayout &DL, Value *Ptr)
Decompose a pointer into a linear expression.
Definition Loads.cpp:887
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
Returns: X * 2^Exp for integral exponents.
Definition APFloat.h:1525
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
@ Other
Any other memory.
Definition ModRef.h:68
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
constexpr unsigned BitWidth
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:1961
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2108
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
@ Continue
Definition DWP.h:22
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define NC
Definition regutils.h:42
Value * materialize(InstCombiner::BuilderTy &Builder) const
static OffsetResult value(Value *V)
static OffsetResult select(Value *Cond, Value *TrueV, Value *FalseV)
static OffsetResult invalid()
This callback is used in conjunction with PointerMayBeCaptured.
static CommonPointerBase compute(Value *LHS, Value *RHS)
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
static constexpr DenormalMode getIEEE()
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:108
bool isZero() const
Returns true if value is all zero.
Definition KnownBits.h:80
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:242
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition KnownBits.h:274
APInt getSignedMaxValue() const
Return the maximal signed value possible given these KnownBits.
Definition KnownBits.h:151
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition KnownBits.h:289
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
bool isConstant() const
Returns true if we know the value of all bits.
Definition KnownBits.h:54
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:248
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition KnownBits.h:145
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
Definition KnownBits.h:129
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
Definition KnownBits.h:114
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:105
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition KnownBits.h:286
APInt getSignedMinValue() const
Return the minimal signed value possible given these KnownBits.
Definition KnownBits.h:135
const APInt & getConstant() const
Returns the value when all bits have a known value.
Definition KnownBits.h:60
Linear expression BasePtr + Index * Scale + Offset.
Definition Loads.h:203
GEPNoWrapFlags Flags
Definition Loads.h:208
Matching combinators.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
const DataLayout & DL
const Instruction * CxtI
const DominatorTree * DT
SimplifyQuery getWithInstruction(const Instruction *I) const
AssumptionCache * AC
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:271
Capture information for a specific Use.