37 #define DEBUG_TYPE "aggressive-instcombine" 44 case Instruction::Trunc:
45 case Instruction::ZExt:
46 case Instruction::SExt:
51 case Instruction::Sub:
52 case Instruction::Mul:
53 case Instruction::And:
55 case Instruction::Xor:
64 bool TruncInstCombine::buildTruncExpressionDag() {
72 while (!Worklist.
empty()) {
75 if (isa<Constant>(Curr)) {
90 InstInfoMap.insert(std::make_pair(
I,
Info()));
94 if (InstInfoMap.count(
I)) {
102 unsigned Opc =
I->getOpcode();
104 case Instruction::Trunc:
105 case Instruction::ZExt:
106 case Instruction::SExt:
113 case Instruction::Sub:
114 case Instruction::Mul:
115 case Instruction::And:
116 case Instruction::Or:
117 case Instruction::Xor: {
120 for (
Value *Operand : Operands)
137 unsigned TruncInstCombine::getMinBitWidth() {
144 unsigned OrigBitWidth =
147 if (isa<Constant>(Src))
148 return TruncBitWidth;
151 InstInfoMap[cast<Instruction>(Src)].ValidBitWidth = TruncBitWidth;
153 while (!Worklist.
empty()) {
156 if (isa<Constant>(Curr)) {
162 auto *
I = cast<Instruction>(Curr);
164 auto &
Info = InstInfoMap[
I];
174 for (
auto *Operand : Operands)
175 if (
auto *IOp = dyn_cast<Instruction>(Operand))
177 std::max(Info.MinBitWidth, InstInfoMap[IOp].MinBitWidth);
183 unsigned ValidBitWidth = Info.ValidBitWidth;
187 Info.MinBitWidth =
std::max(Info.MinBitWidth, Info.ValidBitWidth);
189 for (
auto *Operand : Operands)
190 if (
auto *IOp = dyn_cast<Instruction>(Operand)) {
194 unsigned IOpBitwidth = InstInfoMap.lookup(IOp).ValidBitWidth;
195 if (IOpBitwidth >= ValidBitWidth)
197 InstInfoMap[IOp].ValidBitWidth =
std::max(ValidBitWidth, IOpBitwidth);
201 unsigned MinBitWidth = InstInfoMap.lookup(cast<Instruction>(Src)).MinBitWidth;
202 assert(MinBitWidth >= TruncBitWidth);
204 if (MinBitWidth > TruncBitWidth) {
220 bool FromLegal = MinBitWidth == 1 || DL.
isLegalInteger(OrigBitWidth);
221 bool ToLegal = MinBitWidth == 1 || DL.
isLegalInteger(MinBitWidth);
222 if (!DstTy->
isVectorTy() && FromLegal && !ToLegal)
228 Type *TruncInstCombine::getBestTruncatedType() {
229 if (!buildTruncExpressionDag())
236 unsigned DesiredBitWidth = 0;
237 for (
auto Itr : InstInfoMap) {
241 bool IsExtInst = (isa<ZExtInst>(
I) || isa<SExtInst>(I));
242 for (
auto *U : I->
users())
243 if (
auto *UI = dyn_cast<Instruction>(U))
244 if (UI != CurrentTruncInst && !InstInfoMap.count(UI)) {
250 unsigned ExtInstBitWidth =
252 if (DesiredBitWidth && DesiredBitWidth != ExtInstBitWidth)
254 DesiredBitWidth = ExtInstBitWidth;
258 unsigned OrigBitWidth =
263 unsigned MinBitWidth = getMinBitWidth();
267 if (MinBitWidth >= OrigBitWidth ||
268 (DesiredBitWidth && DesiredBitWidth != MinBitWidth))
279 if (
auto *VTy = dyn_cast<VectorType>(V->
getType()))
284 Value *TruncInstCombine::getReducedOperand(
Value *V,
Type *SclTy) {
286 if (
auto *
C = dyn_cast<Constant>(V)) {
294 auto *
I = cast<Instruction>(V);
297 return Entry.NewValue;
300 void TruncInstCombine::ReduceExpressionDag(
Type *SclTy) {
301 for (
auto &Itr : InstInfoMap) {
305 assert(!NodeInfo.NewValue &&
"Instruction has been evaluated");
308 Value *Res =
nullptr;
311 case Instruction::Trunc:
312 case Instruction::ZExt:
313 case Instruction::SExt: {
319 assert(!isa<TruncInst>(I) &&
"Cannot reach here with TruncInst");
326 Opc == Instruction::SExt);
334 if (
Entry != Worklist.end()) {
335 if (
auto *NewCI = dyn_cast<TruncInst>(Res))
338 Worklist.erase(
Entry);
339 }
else if (
auto *NewCI = dyn_cast<TruncInst>(Res))
340 Worklist.push_back(NewCI);
344 case Instruction::Sub:
345 case Instruction::Mul:
346 case Instruction::And:
347 case Instruction::Or:
348 case Instruction::Xor: {
358 NodeInfo.NewValue = Res;
359 if (
auto *ResI = dyn_cast<Instruction>(Res))
363 Value *Res = getReducedOperand(CurrentTruncInst->
getOperand(0), SclTy);
368 if (
auto *ResI = dyn_cast<Instruction>(Res))
378 for (
auto I = InstInfoMap.rbegin(),
E = InstInfoMap.rend();
I !=
E; ++
I) {
382 if (
I->first->use_empty())
383 I->first->eraseFromParent();
388 bool MadeIRChange =
false;
396 if (
auto *CI = dyn_cast<TruncInst>(&
I))
397 Worklist.push_back(CI);
403 while (!Worklist.empty()) {
404 CurrentTruncInst = Worklist.pop_back_val();
406 if (
Type *NewDstSclTy = getBestTruncatedType()) {
408 dbgs() <<
"ICE: TruncInstCombine reducing type of expression dag " 410 << CurrentTruncInst <<
'\n');
411 ReduceExpressionDag(NewDstSclTy);
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
This class represents lattice values for constants.
void push_back(const T &Elt)
LLVMContext & getContext() const
All values hold a context through their type.
static void getRelevantOperands(Instruction *I, SmallVectorImpl< Value *> &Ops)
Given an instruction and a container, it fills all the relevant operands of that instruction, with respect to the Trunc expression dag optimizaton.
bool isVectorTy() const
True if this is an instance of VectorType.
Type * getSmallestLegalIntType(LLVMContext &C, unsigned Width=0) const
Returns the smallest integer type with size at least as big as Width bits.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static Constant * getIntegerCast(Constant *C, Type *Ty, bool isSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
mir Rename Register Operands
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Attempt to fold the constant using the specified DataLayout.
Type * getType() const
All values are typed, get the type of this value.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
void takeName(Value *V)
Transfer the name from V to this value.
Value * getOperand(unsigned i) const
Analysis containing CSE Info
The instances of the Type class are immutable: once they are created, they are never changed...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool run(Function &F)
Perform TruncInst pattern optimization on given function.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Align max(MaybeAlign Lhs, Align Rhs)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
iterator_range< user_iterator > users()
static Type * getReducedType(Value *V, Type *Ty)
Given a reduced scalar type Ty and a V value, return a reduced type for V, according to its type...
LLVM_NODISCARD bool empty() const
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
LLVM Value Representation.
bool hasOneUse() const
Return true if there is exactly one user of this value.