LLVM 22.0.0git
VectorCombine.cpp
Go to the documentation of this file.
1//===------- VectorCombine.cpp - Optimize partial vector operations -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass optimizes scalar/vector interactions using target cost models. The
10// transforms implemented here may not fit in traditional loop-based or SLP
11// vectorization passes.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/ScopeExit.h"
20#include "llvm/ADT/Statistic.h"
25#include "llvm/Analysis/Loads.h"
30#include "llvm/IR/Dominators.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/IRBuilder.h"
38#include <numeric>
39#include <optional>
40#include <queue>
41#include <set>
42
43#define DEBUG_TYPE "vector-combine"
45
46using namespace llvm;
47using namespace llvm::PatternMatch;
48
49STATISTIC(NumVecLoad, "Number of vector loads formed");
50STATISTIC(NumVecCmp, "Number of vector compares formed");
51STATISTIC(NumVecBO, "Number of vector binops formed");
52STATISTIC(NumVecCmpBO, "Number of vector compare + binop formed");
53STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast");
54STATISTIC(NumScalarOps, "Number of scalar unary + binary ops formed");
55STATISTIC(NumScalarCmp, "Number of scalar compares formed");
56STATISTIC(NumScalarIntrinsic, "Number of scalar intrinsic calls formed");
57
59 "disable-vector-combine", cl::init(false), cl::Hidden,
60 cl::desc("Disable all vector combine transforms"));
61
63 "disable-binop-extract-shuffle", cl::init(false), cl::Hidden,
64 cl::desc("Disable binop extract to shuffle transforms"));
65
67 "vector-combine-max-scan-instrs", cl::init(30), cl::Hidden,
68 cl::desc("Max number of instructions to scan for vector combining."));
69
70static const unsigned InvalidIndex = std::numeric_limits<unsigned>::max();
71
72namespace {
73class VectorCombine {
74public:
75 VectorCombine(Function &F, const TargetTransformInfo &TTI,
78 bool TryEarlyFoldsOnly)
79 : F(F), Builder(F.getContext(), InstSimplifyFolder(*DL)), TTI(TTI),
80 DT(DT), AA(AA), AC(AC), DL(DL), CostKind(CostKind), SQ(*DL),
81 TryEarlyFoldsOnly(TryEarlyFoldsOnly) {}
82
83 bool run();
84
85private:
86 Function &F;
88 const TargetTransformInfo &TTI;
89 const DominatorTree &DT;
90 AAResults &AA;
91 AssumptionCache &AC;
92 const DataLayout *DL;
93 TTI::TargetCostKind CostKind;
94 const SimplifyQuery SQ;
95
96 /// If true, only perform beneficial early IR transforms. Do not introduce new
97 /// vector operations.
98 bool TryEarlyFoldsOnly;
99
100 InstructionWorklist Worklist;
101
102 /// Next instruction to iterate. It will be updated when it is erased by
103 /// RecursivelyDeleteTriviallyDeadInstructions.
104 Instruction *NextInst;
105
106 // TODO: Direct calls from the top-level "run" loop use a plain "Instruction"
107 // parameter. That should be updated to specific sub-classes because the
108 // run loop was changed to dispatch on opcode.
109 bool vectorizeLoadInsert(Instruction &I);
110 bool widenSubvectorLoad(Instruction &I);
111 ExtractElementInst *getShuffleExtract(ExtractElementInst *Ext0,
112 ExtractElementInst *Ext1,
113 unsigned PreferredExtractIndex) const;
114 bool isExtractExtractCheap(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
115 const Instruction &I,
116 ExtractElementInst *&ConvertToShuffle,
117 unsigned PreferredExtractIndex);
118 Value *foldExtExtCmp(Value *V0, Value *V1, Value *ExtIndex, Instruction &I);
119 Value *foldExtExtBinop(Value *V0, Value *V1, Value *ExtIndex, Instruction &I);
120 bool foldExtractExtract(Instruction &I);
121 bool foldInsExtFNeg(Instruction &I);
122 bool foldInsExtBinop(Instruction &I);
123 bool foldInsExtVectorToShuffle(Instruction &I);
124 bool foldBitOpOfCastops(Instruction &I);
125 bool foldBitOpOfCastConstant(Instruction &I);
126 bool foldBitcastShuffle(Instruction &I);
127 bool scalarizeOpOrCmp(Instruction &I);
128 bool scalarizeVPIntrinsic(Instruction &I);
129 bool foldExtractedCmps(Instruction &I);
130 bool foldBinopOfReductions(Instruction &I);
131 bool foldSingleElementStore(Instruction &I);
132 bool scalarizeLoadExtract(Instruction &I);
133 bool scalarizeExtExtract(Instruction &I);
134 bool foldConcatOfBoolMasks(Instruction &I);
135 bool foldPermuteOfBinops(Instruction &I);
136 bool foldShuffleOfBinops(Instruction &I);
137 bool foldShuffleOfSelects(Instruction &I);
138 bool foldShuffleOfCastops(Instruction &I);
139 bool foldShuffleOfShuffles(Instruction &I);
140 bool foldShuffleOfIntrinsics(Instruction &I);
141 bool foldShuffleToIdentity(Instruction &I);
142 bool foldShuffleFromReductions(Instruction &I);
143 bool foldShuffleChainsToReduce(Instruction &I);
144 bool foldCastFromReductions(Instruction &I);
145 bool foldSelectShuffle(Instruction &I, bool FromReduction = false);
146 bool foldInterleaveIntrinsics(Instruction &I);
147 bool shrinkType(Instruction &I);
148 bool shrinkLoadForShuffles(Instruction &I);
149 bool shrinkPhiOfShuffles(Instruction &I);
150
151 void replaceValue(Instruction &Old, Value &New, bool Erase = true) {
152 LLVM_DEBUG(dbgs() << "VC: Replacing: " << Old << '\n');
153 LLVM_DEBUG(dbgs() << " With: " << New << '\n');
154 Old.replaceAllUsesWith(&New);
155 if (auto *NewI = dyn_cast<Instruction>(&New)) {
156 New.takeName(&Old);
157 Worklist.pushUsersToWorkList(*NewI);
158 Worklist.pushValue(NewI);
159 }
160 if (Erase && isInstructionTriviallyDead(&Old)) {
161 eraseInstruction(Old);
162 } else {
163 Worklist.push(&Old);
164 }
165 }
166
167 void eraseInstruction(Instruction &I) {
168 LLVM_DEBUG(dbgs() << "VC: Erasing: " << I << '\n');
169 SmallVector<Value *> Ops(I.operands());
170 Worklist.remove(&I);
171 I.eraseFromParent();
172
173 // Push remaining users of the operands and then the operand itself - allows
174 // further folds that were hindered by OneUse limits.
175 SmallPtrSet<Value *, 4> Visited;
176 for (Value *Op : Ops) {
177 if (!Visited.contains(Op)) {
178 if (auto *OpI = dyn_cast<Instruction>(Op)) {
180 OpI, nullptr, nullptr, [&](Value *V) {
181 if (auto *I = dyn_cast<Instruction>(V)) {
182 LLVM_DEBUG(dbgs() << "VC: Erased: " << *I << '\n');
183 Worklist.remove(I);
184 if (I == NextInst)
185 NextInst = NextInst->getNextNode();
186 Visited.insert(I);
187 }
188 }))
189 continue;
190 Worklist.pushUsersToWorkList(*OpI);
191 Worklist.pushValue(OpI);
192 }
193 }
194 }
195 }
196};
197} // namespace
198
199/// Return the source operand of a potentially bitcasted value. If there is no
200/// bitcast, return the input value itself.
202 while (auto *BitCast = dyn_cast<BitCastInst>(V))
203 V = BitCast->getOperand(0);
204 return V;
205}
206
207static bool canWidenLoad(LoadInst *Load, const TargetTransformInfo &TTI) {
208 // Do not widen load if atomic/volatile or under asan/hwasan/memtag/tsan.
209 // The widened load may load data from dirty regions or create data races
210 // non-existent in the source.
211 if (!Load || !Load->isSimple() || !Load->hasOneUse() ||
212 Load->getFunction()->hasFnAttribute(Attribute::SanitizeMemTag) ||
214 return false;
215
216 // We are potentially transforming byte-sized (8-bit) memory accesses, so make
217 // sure we have all of our type-based constraints in place for this target.
218 Type *ScalarTy = Load->getType()->getScalarType();
219 uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits();
220 unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth();
221 if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 ||
222 ScalarSize % 8 != 0)
223 return false;
224
225 return true;
226}
227
228bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
229 // Match insert into fixed vector of scalar value.
230 // TODO: Handle non-zero insert index.
231 Value *Scalar;
232 if (!match(&I,
234 return false;
235
236 // Optionally match an extract from another vector.
237 Value *X;
238 bool HasExtract = match(Scalar, m_ExtractElt(m_Value(X), m_ZeroInt()));
239 if (!HasExtract)
240 X = Scalar;
241
242 auto *Load = dyn_cast<LoadInst>(X);
243 if (!canWidenLoad(Load, TTI))
244 return false;
245
246 Type *ScalarTy = Scalar->getType();
247 uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits();
248 unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth();
249
250 // Check safety of replacing the scalar load with a larger vector load.
251 // We use minimal alignment (maximum flexibility) because we only care about
252 // the dereferenceable region. When calculating cost and creating a new op,
253 // we may use a larger value based on alignment attributes.
254 Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
255 assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
256
257 unsigned MinVecNumElts = MinVectorSize / ScalarSize;
258 auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
259 unsigned OffsetEltIndex = 0;
260 Align Alignment = Load->getAlign();
261 if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), *DL, Load, &AC,
262 &DT)) {
263 // It is not safe to load directly from the pointer, but we can still peek
264 // through gep offsets and check if it safe to load from a base address with
265 // updated alignment. If it is, we can shuffle the element(s) into place
266 // after loading.
267 unsigned OffsetBitWidth = DL->getIndexTypeSizeInBits(SrcPtr->getType());
268 APInt Offset(OffsetBitWidth, 0);
270
271 // We want to shuffle the result down from a high element of a vector, so
272 // the offset must be positive.
273 if (Offset.isNegative())
274 return false;
275
276 // The offset must be a multiple of the scalar element to shuffle cleanly
277 // in the element's size.
278 uint64_t ScalarSizeInBytes = ScalarSize / 8;
279 if (Offset.urem(ScalarSizeInBytes) != 0)
280 return false;
281
282 // If we load MinVecNumElts, will our target element still be loaded?
283 OffsetEltIndex = Offset.udiv(ScalarSizeInBytes).getZExtValue();
284 if (OffsetEltIndex >= MinVecNumElts)
285 return false;
286
287 if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), *DL, Load, &AC,
288 &DT))
289 return false;
290
291 // Update alignment with offset value. Note that the offset could be negated
292 // to more accurately represent "(new) SrcPtr - Offset = (old) SrcPtr", but
293 // negation does not change the result of the alignment calculation.
294 Alignment = commonAlignment(Alignment, Offset.getZExtValue());
295 }
296
297 // Original pattern: insertelt undef, load [free casts of] PtrOp, 0
298 // Use the greater of the alignment on the load or its source pointer.
299 Alignment = std::max(SrcPtr->getPointerAlignment(*DL), Alignment);
300 Type *LoadTy = Load->getType();
301 unsigned AS = Load->getPointerAddressSpace();
302 InstructionCost OldCost =
303 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS, CostKind);
304 APInt DemandedElts = APInt::getOneBitSet(MinVecNumElts, 0);
305 OldCost +=
306 TTI.getScalarizationOverhead(MinVecTy, DemandedElts,
307 /* Insert */ true, HasExtract, CostKind);
308
309 // New pattern: load VecPtr
310 InstructionCost NewCost =
311 TTI.getMemoryOpCost(Instruction::Load, MinVecTy, Alignment, AS, CostKind);
312 // Optionally, we are shuffling the loaded vector element(s) into place.
313 // For the mask set everything but element 0 to undef to prevent poison from
314 // propagating from the extra loaded memory. This will also optionally
315 // shrink/grow the vector from the loaded size to the output size.
316 // We assume this operation has no cost in codegen if there was no offset.
317 // Note that we could use freeze to avoid poison problems, but then we might
318 // still need a shuffle to change the vector size.
319 auto *Ty = cast<FixedVectorType>(I.getType());
320 unsigned OutputNumElts = Ty->getNumElements();
321 SmallVector<int, 16> Mask(OutputNumElts, PoisonMaskElem);
322 assert(OffsetEltIndex < MinVecNumElts && "Address offset too big");
323 Mask[0] = OffsetEltIndex;
324 if (OffsetEltIndex)
325 NewCost += TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, MinVecTy, Mask,
326 CostKind);
327
328 // We can aggressively convert to the vector form because the backend can
329 // invert this transform if it does not result in a performance win.
330 if (OldCost < NewCost || !NewCost.isValid())
331 return false;
332
333 // It is safe and potentially profitable to load a vector directly:
334 // inselt undef, load Scalar, 0 --> load VecPtr
335 IRBuilder<> Builder(Load);
336 Value *CastedPtr =
337 Builder.CreatePointerBitCastOrAddrSpaceCast(SrcPtr, Builder.getPtrTy(AS));
338 Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment);
339 VecLd = Builder.CreateShuffleVector(VecLd, Mask);
340
341 replaceValue(I, *VecLd);
342 ++NumVecLoad;
343 return true;
344}
345
346/// If we are loading a vector and then inserting it into a larger vector with
347/// undefined elements, try to load the larger vector and eliminate the insert.
348/// This removes a shuffle in IR and may allow combining of other loaded values.
349bool VectorCombine::widenSubvectorLoad(Instruction &I) {
350 // Match subvector insert of fixed vector.
351 auto *Shuf = cast<ShuffleVectorInst>(&I);
352 if (!Shuf->isIdentityWithPadding())
353 return false;
354
355 // Allow a non-canonical shuffle mask that is choosing elements from op1.
356 unsigned NumOpElts =
357 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
358 unsigned OpIndex = any_of(Shuf->getShuffleMask(), [&NumOpElts](int M) {
359 return M >= (int)(NumOpElts);
360 });
361
362 auto *Load = dyn_cast<LoadInst>(Shuf->getOperand(OpIndex));
363 if (!canWidenLoad(Load, TTI))
364 return false;
365
366 // We use minimal alignment (maximum flexibility) because we only care about
367 // the dereferenceable region. When calculating cost and creating a new op,
368 // we may use a larger value based on alignment attributes.
369 auto *Ty = cast<FixedVectorType>(I.getType());
370 Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
371 assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
372 Align Alignment = Load->getAlign();
373 if (!isSafeToLoadUnconditionally(SrcPtr, Ty, Align(1), *DL, Load, &AC, &DT))
374 return false;
375
376 Alignment = std::max(SrcPtr->getPointerAlignment(*DL), Alignment);
377 Type *LoadTy = Load->getType();
378 unsigned AS = Load->getPointerAddressSpace();
379
380 // Original pattern: insert_subvector (load PtrOp)
381 // This conservatively assumes that the cost of a subvector insert into an
382 // undef value is 0. We could add that cost if the cost model accurately
383 // reflects the real cost of that operation.
384 InstructionCost OldCost =
385 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS, CostKind);
386
387 // New pattern: load PtrOp
388 InstructionCost NewCost =
389 TTI.getMemoryOpCost(Instruction::Load, Ty, Alignment, AS, CostKind);
390
391 // We can aggressively convert to the vector form because the backend can
392 // invert this transform if it does not result in a performance win.
393 if (OldCost < NewCost || !NewCost.isValid())
394 return false;
395
396 IRBuilder<> Builder(Load);
397 Value *CastedPtr =
398 Builder.CreatePointerBitCastOrAddrSpaceCast(SrcPtr, Builder.getPtrTy(AS));
399 Value *VecLd = Builder.CreateAlignedLoad(Ty, CastedPtr, Alignment);
400 replaceValue(I, *VecLd);
401 ++NumVecLoad;
402 return true;
403}
404
405/// Determine which, if any, of the inputs should be replaced by a shuffle
406/// followed by extract from a different index.
407ExtractElementInst *VectorCombine::getShuffleExtract(
408 ExtractElementInst *Ext0, ExtractElementInst *Ext1,
409 unsigned PreferredExtractIndex = InvalidIndex) const {
410 auto *Index0C = dyn_cast<ConstantInt>(Ext0->getIndexOperand());
411 auto *Index1C = dyn_cast<ConstantInt>(Ext1->getIndexOperand());
412 assert(Index0C && Index1C && "Expected constant extract indexes");
413
414 unsigned Index0 = Index0C->getZExtValue();
415 unsigned Index1 = Index1C->getZExtValue();
416
417 // If the extract indexes are identical, no shuffle is needed.
418 if (Index0 == Index1)
419 return nullptr;
420
421 Type *VecTy = Ext0->getVectorOperand()->getType();
422 assert(VecTy == Ext1->getVectorOperand()->getType() && "Need matching types");
423 InstructionCost Cost0 =
424 TTI.getVectorInstrCost(*Ext0, VecTy, CostKind, Index0);
425 InstructionCost Cost1 =
426 TTI.getVectorInstrCost(*Ext1, VecTy, CostKind, Index1);
427
428 // If both costs are invalid no shuffle is needed
429 if (!Cost0.isValid() && !Cost1.isValid())
430 return nullptr;
431
432 // We are extracting from 2 different indexes, so one operand must be shuffled
433 // before performing a vector operation and/or extract. The more expensive
434 // extract will be replaced by a shuffle.
435 if (Cost0 > Cost1)
436 return Ext0;
437 if (Cost1 > Cost0)
438 return Ext1;
439
440 // If the costs are equal and there is a preferred extract index, shuffle the
441 // opposite operand.
442 if (PreferredExtractIndex == Index0)
443 return Ext1;
444 if (PreferredExtractIndex == Index1)
445 return Ext0;
446
447 // Otherwise, replace the extract with the higher index.
448 return Index0 > Index1 ? Ext0 : Ext1;
449}
450
451/// Compare the relative costs of 2 extracts followed by scalar operation vs.
452/// vector operation(s) followed by extract. Return true if the existing
453/// instructions are cheaper than a vector alternative. Otherwise, return false
454/// and if one of the extracts should be transformed to a shufflevector, set
455/// \p ConvertToShuffle to that extract instruction.
456bool VectorCombine::isExtractExtractCheap(ExtractElementInst *Ext0,
457 ExtractElementInst *Ext1,
458 const Instruction &I,
459 ExtractElementInst *&ConvertToShuffle,
460 unsigned PreferredExtractIndex) {
461 auto *Ext0IndexC = dyn_cast<ConstantInt>(Ext0->getIndexOperand());
462 auto *Ext1IndexC = dyn_cast<ConstantInt>(Ext1->getIndexOperand());
463 assert(Ext0IndexC && Ext1IndexC && "Expected constant extract indexes");
464
465 unsigned Opcode = I.getOpcode();
466 Value *Ext0Src = Ext0->getVectorOperand();
467 Value *Ext1Src = Ext1->getVectorOperand();
468 Type *ScalarTy = Ext0->getType();
469 auto *VecTy = cast<VectorType>(Ext0Src->getType());
470 InstructionCost ScalarOpCost, VectorOpCost;
471
472 // Get cost estimates for scalar and vector versions of the operation.
473 bool IsBinOp = Instruction::isBinaryOp(Opcode);
474 if (IsBinOp) {
475 ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy, CostKind);
476 VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy, CostKind);
477 } else {
478 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
479 "Expected a compare");
480 CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate();
481 ScalarOpCost = TTI.getCmpSelInstrCost(
482 Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred, CostKind);
483 VectorOpCost = TTI.getCmpSelInstrCost(
484 Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred, CostKind);
485 }
486
487 // Get cost estimates for the extract elements. These costs will factor into
488 // both sequences.
489 unsigned Ext0Index = Ext0IndexC->getZExtValue();
490 unsigned Ext1Index = Ext1IndexC->getZExtValue();
491
492 InstructionCost Extract0Cost =
493 TTI.getVectorInstrCost(*Ext0, VecTy, CostKind, Ext0Index);
494 InstructionCost Extract1Cost =
495 TTI.getVectorInstrCost(*Ext1, VecTy, CostKind, Ext1Index);
496
497 // A more expensive extract will always be replaced by a splat shuffle.
498 // For example, if Ext0 is more expensive:
499 // opcode (extelt V0, Ext0), (ext V1, Ext1) -->
500 // extelt (opcode (splat V0, Ext0), V1), Ext1
501 // TODO: Evaluate whether that always results in lowest cost. Alternatively,
502 // check the cost of creating a broadcast shuffle and shuffling both
503 // operands to element 0.
504 unsigned BestExtIndex = Extract0Cost > Extract1Cost ? Ext0Index : Ext1Index;
505 unsigned BestInsIndex = Extract0Cost > Extract1Cost ? Ext1Index : Ext0Index;
506 InstructionCost CheapExtractCost = std::min(Extract0Cost, Extract1Cost);
507
508 // Extra uses of the extracts mean that we include those costs in the
509 // vector total because those instructions will not be eliminated.
510 InstructionCost OldCost, NewCost;
511 if (Ext0Src == Ext1Src && Ext0Index == Ext1Index) {
512 // Handle a special case. If the 2 extracts are identical, adjust the
513 // formulas to account for that. The extra use charge allows for either the
514 // CSE'd pattern or an unoptimized form with identical values:
515 // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C
516 bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2)
517 : !Ext0->hasOneUse() || !Ext1->hasOneUse();
518 OldCost = CheapExtractCost + ScalarOpCost;
519 NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost;
520 } else {
521 // Handle the general case. Each extract is actually a different value:
522 // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C
523 OldCost = Extract0Cost + Extract1Cost + ScalarOpCost;
524 NewCost = VectorOpCost + CheapExtractCost +
525 !Ext0->hasOneUse() * Extract0Cost +
526 !Ext1->hasOneUse() * Extract1Cost;
527 }
528
529 ConvertToShuffle = getShuffleExtract(Ext0, Ext1, PreferredExtractIndex);
530 if (ConvertToShuffle) {
531 if (IsBinOp && DisableBinopExtractShuffle)
532 return true;
533
534 // If we are extracting from 2 different indexes, then one operand must be
535 // shuffled before performing the vector operation. The shuffle mask is
536 // poison except for 1 lane that is being translated to the remaining
537 // extraction lane. Therefore, it is a splat shuffle. Ex:
538 // ShufMask = { poison, poison, 0, poison }
539 // TODO: The cost model has an option for a "broadcast" shuffle
540 // (splat-from-element-0), but no option for a more general splat.
541 if (auto *FixedVecTy = dyn_cast<FixedVectorType>(VecTy)) {
542 SmallVector<int> ShuffleMask(FixedVecTy->getNumElements(),
544 ShuffleMask[BestInsIndex] = BestExtIndex;
546 VecTy, VecTy, ShuffleMask, CostKind, 0,
547 nullptr, {ConvertToShuffle});
548 } else {
550 VecTy, VecTy, {}, CostKind, 0, nullptr,
551 {ConvertToShuffle});
552 }
553 }
554
555 // Aggressively form a vector op if the cost is equal because the transform
556 // may enable further optimization.
557 // Codegen can reverse this transform (scalarize) if it was not profitable.
558 return OldCost < NewCost;
559}
560
561/// Create a shuffle that translates (shifts) 1 element from the input vector
562/// to a new element location.
563static Value *createShiftShuffle(Value *Vec, unsigned OldIndex,
564 unsigned NewIndex, IRBuilderBase &Builder) {
565 // The shuffle mask is poison except for 1 lane that is being translated
566 // to the new element index. Example for OldIndex == 2 and NewIndex == 0:
567 // ShufMask = { 2, poison, poison, poison }
568 auto *VecTy = cast<FixedVectorType>(Vec->getType());
569 SmallVector<int, 32> ShufMask(VecTy->getNumElements(), PoisonMaskElem);
570 ShufMask[NewIndex] = OldIndex;
571 return Builder.CreateShuffleVector(Vec, ShufMask, "shift");
572}
573
574/// Given an extract element instruction with constant index operand, shuffle
575/// the source vector (shift the scalar element) to a NewIndex for extraction.
576/// Return null if the input can be constant folded, so that we are not creating
577/// unnecessary instructions.
578static Value *translateExtract(ExtractElementInst *ExtElt, unsigned NewIndex,
579 IRBuilderBase &Builder) {
580 // Shufflevectors can only be created for fixed-width vectors.
581 Value *X = ExtElt->getVectorOperand();
582 if (!isa<FixedVectorType>(X->getType()))
583 return nullptr;
584
585 // If the extract can be constant-folded, this code is unsimplified. Defer
586 // to other passes to handle that.
587 Value *C = ExtElt->getIndexOperand();
588 assert(isa<ConstantInt>(C) && "Expected a constant index operand");
589 if (isa<Constant>(X))
590 return nullptr;
591
592 Value *Shuf = createShiftShuffle(X, cast<ConstantInt>(C)->getZExtValue(),
593 NewIndex, Builder);
594 return Shuf;
595}
596
597/// Try to reduce extract element costs by converting scalar compares to vector
598/// compares followed by extract.
599/// cmp (ext0 V0, ExtIndex), (ext1 V1, ExtIndex)
600Value *VectorCombine::foldExtExtCmp(Value *V0, Value *V1, Value *ExtIndex,
601 Instruction &I) {
602 assert(isa<CmpInst>(&I) && "Expected a compare");
603
604 // cmp Pred (extelt V0, ExtIndex), (extelt V1, ExtIndex)
605 // --> extelt (cmp Pred V0, V1), ExtIndex
606 ++NumVecCmp;
607 CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate();
608 Value *VecCmp = Builder.CreateCmp(Pred, V0, V1);
609 return Builder.CreateExtractElement(VecCmp, ExtIndex, "foldExtExtCmp");
610}
611
612/// Try to reduce extract element costs by converting scalar binops to vector
613/// binops followed by extract.
614/// bo (ext0 V0, ExtIndex), (ext1 V1, ExtIndex)
615Value *VectorCombine::foldExtExtBinop(Value *V0, Value *V1, Value *ExtIndex,
616 Instruction &I) {
617 assert(isa<BinaryOperator>(&I) && "Expected a binary operator");
618
619 // bo (extelt V0, ExtIndex), (extelt V1, ExtIndex)
620 // --> extelt (bo V0, V1), ExtIndex
621 ++NumVecBO;
622 Value *VecBO = Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0,
623 V1, "foldExtExtBinop");
624
625 // All IR flags are safe to back-propagate because any potential poison
626 // created in unused vector elements is discarded by the extract.
627 if (auto *VecBOInst = dyn_cast<Instruction>(VecBO))
628 VecBOInst->copyIRFlags(&I);
629
630 return Builder.CreateExtractElement(VecBO, ExtIndex, "foldExtExtBinop");
631}
632
633/// Match an instruction with extracted vector operands.
634bool VectorCombine::foldExtractExtract(Instruction &I) {
635 // It is not safe to transform things like div, urem, etc. because we may
636 // create undefined behavior when executing those on unknown vector elements.
638 return false;
639
640 Instruction *I0, *I1;
641 CmpPredicate Pred = CmpInst::BAD_ICMP_PREDICATE;
642 if (!match(&I, m_Cmp(Pred, m_Instruction(I0), m_Instruction(I1))) &&
644 return false;
645
646 Value *V0, *V1;
647 uint64_t C0, C1;
648 if (!match(I0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) ||
649 !match(I1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) ||
650 V0->getType() != V1->getType())
651 return false;
652
653 // If the scalar value 'I' is going to be re-inserted into a vector, then try
654 // to create an extract to that same element. The extract/insert can be
655 // reduced to a "select shuffle".
656 // TODO: If we add a larger pattern match that starts from an insert, this
657 // probably becomes unnecessary.
658 auto *Ext0 = cast<ExtractElementInst>(I0);
659 auto *Ext1 = cast<ExtractElementInst>(I1);
660 uint64_t InsertIndex = InvalidIndex;
661 if (I.hasOneUse())
662 match(I.user_back(),
663 m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex)));
664
665 ExtractElementInst *ExtractToChange;
666 if (isExtractExtractCheap(Ext0, Ext1, I, ExtractToChange, InsertIndex))
667 return false;
668
669 Value *ExtOp0 = Ext0->getVectorOperand();
670 Value *ExtOp1 = Ext1->getVectorOperand();
671
672 if (ExtractToChange) {
673 unsigned CheapExtractIdx = ExtractToChange == Ext0 ? C1 : C0;
674 Value *NewExtOp =
675 translateExtract(ExtractToChange, CheapExtractIdx, Builder);
676 if (!NewExtOp)
677 return false;
678 if (ExtractToChange == Ext0)
679 ExtOp0 = NewExtOp;
680 else
681 ExtOp1 = NewExtOp;
682 }
683
684 Value *ExtIndex = ExtractToChange == Ext0 ? Ext1->getIndexOperand()
685 : Ext0->getIndexOperand();
686 Value *NewExt = Pred != CmpInst::BAD_ICMP_PREDICATE
687 ? foldExtExtCmp(ExtOp0, ExtOp1, ExtIndex, I)
688 : foldExtExtBinop(ExtOp0, ExtOp1, ExtIndex, I);
689 Worklist.push(Ext0);
690 Worklist.push(Ext1);
691 replaceValue(I, *NewExt);
692 return true;
693}
694
695/// Try to replace an extract + scalar fneg + insert with a vector fneg +
696/// shuffle.
697bool VectorCombine::foldInsExtFNeg(Instruction &I) {
698 // Match an insert (op (extract)) pattern.
699 Value *DestVec;
700 uint64_t Index;
701 Instruction *FNeg;
702 if (!match(&I, m_InsertElt(m_Value(DestVec), m_OneUse(m_Instruction(FNeg)),
703 m_ConstantInt(Index))))
704 return false;
705
706 // Note: This handles the canonical fneg instruction and "fsub -0.0, X".
707 Value *SrcVec;
708 Instruction *Extract;
709 if (!match(FNeg, m_FNeg(m_CombineAnd(
710 m_Instruction(Extract),
711 m_ExtractElt(m_Value(SrcVec), m_SpecificInt(Index))))))
712 return false;
713
714 auto *VecTy = cast<FixedVectorType>(I.getType());
715 auto *ScalarTy = VecTy->getScalarType();
716 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcVec->getType());
717 if (!SrcVecTy || ScalarTy != SrcVecTy->getScalarType())
718 return false;
719
720 // Ignore bogus insert/extract index.
721 unsigned NumElts = VecTy->getNumElements();
722 if (Index >= NumElts)
723 return false;
724
725 // We are inserting the negated element into the same lane that we extracted
726 // from. This is equivalent to a select-shuffle that chooses all but the
727 // negated element from the destination vector.
728 SmallVector<int> Mask(NumElts);
729 std::iota(Mask.begin(), Mask.end(), 0);
730 Mask[Index] = Index + NumElts;
731 InstructionCost OldCost =
732 TTI.getArithmeticInstrCost(Instruction::FNeg, ScalarTy, CostKind) +
733 TTI.getVectorInstrCost(I, VecTy, CostKind, Index);
734
735 // If the extract has one use, it will be eliminated, so count it in the
736 // original cost. If it has more than one use, ignore the cost because it will
737 // be the same before/after.
738 if (Extract->hasOneUse())
739 OldCost += TTI.getVectorInstrCost(*Extract, VecTy, CostKind, Index);
740
741 InstructionCost NewCost =
742 TTI.getArithmeticInstrCost(Instruction::FNeg, VecTy, CostKind) +
744 Mask, CostKind);
745
746 bool NeedLenChg = SrcVecTy->getNumElements() != NumElts;
747 // If the lengths of the two vectors are not equal,
748 // we need to add a length-change vector. Add this cost.
749 SmallVector<int> SrcMask;
750 if (NeedLenChg) {
751 SrcMask.assign(NumElts, PoisonMaskElem);
752 SrcMask[Index] = Index;
754 VecTy, SrcVecTy, SrcMask, CostKind);
755 }
756
757 if (NewCost > OldCost)
758 return false;
759
760 Value *NewShuf;
761 // insertelt DestVec, (fneg (extractelt SrcVec, Index)), Index
762 Value *VecFNeg = Builder.CreateFNegFMF(SrcVec, FNeg);
763 if (NeedLenChg) {
764 // shuffle DestVec, (shuffle (fneg SrcVec), poison, SrcMask), Mask
765 Value *LenChgShuf = Builder.CreateShuffleVector(VecFNeg, SrcMask);
766 NewShuf = Builder.CreateShuffleVector(DestVec, LenChgShuf, Mask);
767 } else {
768 // shuffle DestVec, (fneg SrcVec), Mask
769 NewShuf = Builder.CreateShuffleVector(DestVec, VecFNeg, Mask);
770 }
771
772 replaceValue(I, *NewShuf);
773 return true;
774}
775
776/// Try to fold insert(binop(x,y),binop(a,b),idx)
777/// --> binop(insert(x,a,idx),insert(y,b,idx))
778bool VectorCombine::foldInsExtBinop(Instruction &I) {
779 BinaryOperator *VecBinOp, *SclBinOp;
780 uint64_t Index;
781 if (!match(&I,
782 m_InsertElt(m_OneUse(m_BinOp(VecBinOp)),
783 m_OneUse(m_BinOp(SclBinOp)), m_ConstantInt(Index))))
784 return false;
785
786 // TODO: Add support for addlike etc.
787 Instruction::BinaryOps BinOpcode = VecBinOp->getOpcode();
788 if (BinOpcode != SclBinOp->getOpcode())
789 return false;
790
791 auto *ResultTy = dyn_cast<FixedVectorType>(I.getType());
792 if (!ResultTy)
793 return false;
794
795 // TODO: Attempt to detect m_ExtractElt for scalar operands and convert to
796 // shuffle?
797
799 TTI.getInstructionCost(VecBinOp, CostKind) +
801 InstructionCost NewCost =
802 TTI.getArithmeticInstrCost(BinOpcode, ResultTy, CostKind) +
803 TTI.getVectorInstrCost(Instruction::InsertElement, ResultTy, CostKind,
804 Index, VecBinOp->getOperand(0),
805 SclBinOp->getOperand(0)) +
806 TTI.getVectorInstrCost(Instruction::InsertElement, ResultTy, CostKind,
807 Index, VecBinOp->getOperand(1),
808 SclBinOp->getOperand(1));
809
810 LLVM_DEBUG(dbgs() << "Found an insertion of two binops: " << I
811 << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost
812 << "\n");
813 if (NewCost > OldCost)
814 return false;
815
816 Value *NewIns0 = Builder.CreateInsertElement(VecBinOp->getOperand(0),
817 SclBinOp->getOperand(0), Index);
818 Value *NewIns1 = Builder.CreateInsertElement(VecBinOp->getOperand(1),
819 SclBinOp->getOperand(1), Index);
820 Value *NewBO = Builder.CreateBinOp(BinOpcode, NewIns0, NewIns1);
821
822 // Intersect flags from the old binops.
823 if (auto *NewInst = dyn_cast<Instruction>(NewBO)) {
824 NewInst->copyIRFlags(VecBinOp);
825 NewInst->andIRFlags(SclBinOp);
826 }
827
828 Worklist.pushValue(NewIns0);
829 Worklist.pushValue(NewIns1);
830 replaceValue(I, *NewBO);
831 return true;
832}
833
834/// Match: bitop(castop(x), castop(y)) -> castop(bitop(x, y))
835/// Supports: bitcast, trunc, sext, zext
836bool VectorCombine::foldBitOpOfCastops(Instruction &I) {
837 // Check if this is a bitwise logic operation
838 auto *BinOp = dyn_cast<BinaryOperator>(&I);
839 if (!BinOp || !BinOp->isBitwiseLogicOp())
840 return false;
841
842 // Get the cast instructions
843 auto *LHSCast = dyn_cast<CastInst>(BinOp->getOperand(0));
844 auto *RHSCast = dyn_cast<CastInst>(BinOp->getOperand(1));
845 if (!LHSCast || !RHSCast) {
846 LLVM_DEBUG(dbgs() << " One or both operands are not cast instructions\n");
847 return false;
848 }
849
850 // Both casts must be the same type
851 Instruction::CastOps CastOpcode = LHSCast->getOpcode();
852 if (CastOpcode != RHSCast->getOpcode())
853 return false;
854
855 // Only handle supported cast operations
856 switch (CastOpcode) {
857 case Instruction::BitCast:
858 case Instruction::Trunc:
859 case Instruction::SExt:
860 case Instruction::ZExt:
861 break;
862 default:
863 return false;
864 }
865
866 Value *LHSSrc = LHSCast->getOperand(0);
867 Value *RHSSrc = RHSCast->getOperand(0);
868
869 // Source types must match
870 if (LHSSrc->getType() != RHSSrc->getType())
871 return false;
872
873 auto *SrcTy = LHSSrc->getType();
874 auto *DstTy = I.getType();
875 // Bitcasts can handle scalar/vector mixes, such as i16 -> <16 x i1>.
876 // Other casts only handle vector types with integer elements.
877 if (CastOpcode != Instruction::BitCast &&
878 (!isa<FixedVectorType>(SrcTy) || !isa<FixedVectorType>(DstTy)))
879 return false;
880
881 // Only integer scalar/vector values are legal for bitwise logic operations.
882 if (!SrcTy->getScalarType()->isIntegerTy() ||
883 !DstTy->getScalarType()->isIntegerTy())
884 return false;
885
886 // Cost Check :
887 // OldCost = bitlogic + 2*casts
888 // NewCost = bitlogic + cast
889
890 // Calculate specific costs for each cast with instruction context
892 CastOpcode, DstTy, SrcTy, TTI::CastContextHint::None, CostKind, LHSCast);
894 CastOpcode, DstTy, SrcTy, TTI::CastContextHint::None, CostKind, RHSCast);
895
896 InstructionCost OldCost =
897 TTI.getArithmeticInstrCost(BinOp->getOpcode(), DstTy, CostKind) +
898 LHSCastCost + RHSCastCost;
899
900 // For new cost, we can't provide an instruction (it doesn't exist yet)
901 InstructionCost GenericCastCost = TTI.getCastInstrCost(
902 CastOpcode, DstTy, SrcTy, TTI::CastContextHint::None, CostKind);
903
904 InstructionCost NewCost =
905 TTI.getArithmeticInstrCost(BinOp->getOpcode(), SrcTy, CostKind) +
906 GenericCastCost;
907
908 // Account for multi-use casts using specific costs
909 if (!LHSCast->hasOneUse())
910 NewCost += LHSCastCost;
911 if (!RHSCast->hasOneUse())
912 NewCost += RHSCastCost;
913
914 LLVM_DEBUG(dbgs() << "foldBitOpOfCastops: OldCost=" << OldCost
915 << " NewCost=" << NewCost << "\n");
916
917 if (NewCost > OldCost)
918 return false;
919
920 // Create the operation on the source type
921 Value *NewOp = Builder.CreateBinOp(BinOp->getOpcode(), LHSSrc, RHSSrc,
922 BinOp->getName() + ".inner");
923 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NewOp))
924 NewBinOp->copyIRFlags(BinOp);
925
926 Worklist.pushValue(NewOp);
927
928 // Create the cast operation directly to ensure we get a new instruction
929 Instruction *NewCast = CastInst::Create(CastOpcode, NewOp, I.getType());
930
931 // Preserve cast instruction flags
932 NewCast->copyIRFlags(LHSCast);
933 NewCast->andIRFlags(RHSCast);
934
935 // Insert the new instruction
936 Value *Result = Builder.Insert(NewCast);
937
938 replaceValue(I, *Result);
939 return true;
940}
941
942/// Match:
943// bitop(castop(x), C) ->
944// bitop(castop(x), castop(InvC)) ->
945// castop(bitop(x, InvC))
946// Supports: bitcast
947bool VectorCombine::foldBitOpOfCastConstant(Instruction &I) {
949 Constant *C;
950
951 // Check if this is a bitwise logic operation
953 return false;
954
955 // Get the cast instructions
956 auto *LHSCast = dyn_cast<CastInst>(LHS);
957 if (!LHSCast)
958 return false;
959
960 Instruction::CastOps CastOpcode = LHSCast->getOpcode();
961
962 // Only handle supported cast operations
963 switch (CastOpcode) {
964 case Instruction::BitCast:
965 case Instruction::ZExt:
966 case Instruction::SExt:
967 case Instruction::Trunc:
968 break;
969 default:
970 return false;
971 }
972
973 Value *LHSSrc = LHSCast->getOperand(0);
974
975 auto *SrcTy = LHSSrc->getType();
976 auto *DstTy = I.getType();
977 // Bitcasts can handle scalar/vector mixes, such as i16 -> <16 x i1>.
978 // Other casts only handle vector types with integer elements.
979 if (CastOpcode != Instruction::BitCast &&
980 (!isa<FixedVectorType>(SrcTy) || !isa<FixedVectorType>(DstTy)))
981 return false;
982
983 // Only integer scalar/vector values are legal for bitwise logic operations.
984 if (!SrcTy->getScalarType()->isIntegerTy() ||
985 !DstTy->getScalarType()->isIntegerTy())
986 return false;
987
988 // Find the constant InvC, such that castop(InvC) equals to C.
989 PreservedCastFlags RHSFlags;
990 Constant *InvC = getLosslessInvCast(C, SrcTy, CastOpcode, *DL, &RHSFlags);
991 if (!InvC)
992 return false;
993
994 // Cost Check :
995 // OldCost = bitlogic + cast
996 // NewCost = bitlogic + cast
997
998 // Calculate specific costs for each cast with instruction context
1000 CastOpcode, DstTy, SrcTy, TTI::CastContextHint::None, CostKind, LHSCast);
1001
1002 InstructionCost OldCost =
1003 TTI.getArithmeticInstrCost(I.getOpcode(), DstTy, CostKind) + LHSCastCost;
1004
1005 // For new cost, we can't provide an instruction (it doesn't exist yet)
1006 InstructionCost GenericCastCost = TTI.getCastInstrCost(
1007 CastOpcode, DstTy, SrcTy, TTI::CastContextHint::None, CostKind);
1008
1009 InstructionCost NewCost =
1010 TTI.getArithmeticInstrCost(I.getOpcode(), SrcTy, CostKind) +
1011 GenericCastCost;
1012
1013 // Account for multi-use casts using specific costs
1014 if (!LHSCast->hasOneUse())
1015 NewCost += LHSCastCost;
1016
1017 LLVM_DEBUG(dbgs() << "foldBitOpOfCastConstant: OldCost=" << OldCost
1018 << " NewCost=" << NewCost << "\n");
1019
1020 if (NewCost > OldCost)
1021 return false;
1022
1023 // Create the operation on the source type
1024 Value *NewOp = Builder.CreateBinOp((Instruction::BinaryOps)I.getOpcode(),
1025 LHSSrc, InvC, I.getName() + ".inner");
1026 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NewOp))
1027 NewBinOp->copyIRFlags(&I);
1028
1029 Worklist.pushValue(NewOp);
1030
1031 // Create the cast operation directly to ensure we get a new instruction
1032 Instruction *NewCast = CastInst::Create(CastOpcode, NewOp, I.getType());
1033
1034 // Preserve cast instruction flags
1035 if (RHSFlags.NNeg)
1036 NewCast->setNonNeg();
1037 if (RHSFlags.NUW)
1038 NewCast->setHasNoUnsignedWrap();
1039 if (RHSFlags.NSW)
1040 NewCast->setHasNoSignedWrap();
1041
1042 NewCast->andIRFlags(LHSCast);
1043
1044 // Insert the new instruction
1045 Value *Result = Builder.Insert(NewCast);
1046
1047 replaceValue(I, *Result);
1048 return true;
1049}
1050
1051/// If this is a bitcast of a shuffle, try to bitcast the source vector to the
1052/// destination type followed by shuffle. This can enable further transforms by
1053/// moving bitcasts or shuffles together.
1054bool VectorCombine::foldBitcastShuffle(Instruction &I) {
1055 Value *V0, *V1;
1056 ArrayRef<int> Mask;
1057 if (!match(&I, m_BitCast(m_OneUse(
1058 m_Shuffle(m_Value(V0), m_Value(V1), m_Mask(Mask))))))
1059 return false;
1060
1061 // 1) Do not fold bitcast shuffle for scalable type. First, shuffle cost for
1062 // scalable type is unknown; Second, we cannot reason if the narrowed shuffle
1063 // mask for scalable type is a splat or not.
1064 // 2) Disallow non-vector casts.
1065 // TODO: We could allow any shuffle.
1066 auto *DestTy = dyn_cast<FixedVectorType>(I.getType());
1067 auto *SrcTy = dyn_cast<FixedVectorType>(V0->getType());
1068 if (!DestTy || !SrcTy)
1069 return false;
1070
1071 unsigned DestEltSize = DestTy->getScalarSizeInBits();
1072 unsigned SrcEltSize = SrcTy->getScalarSizeInBits();
1073 if (SrcTy->getPrimitiveSizeInBits() % DestEltSize != 0)
1074 return false;
1075
1076 bool IsUnary = isa<UndefValue>(V1);
1077
1078 // For binary shuffles, only fold bitcast(shuffle(X,Y))
1079 // if it won't increase the number of bitcasts.
1080 if (!IsUnary) {
1083 if (!(BCTy0 && BCTy0->getElementType() == DestTy->getElementType()) &&
1084 !(BCTy1 && BCTy1->getElementType() == DestTy->getElementType()))
1085 return false;
1086 }
1087
1088 SmallVector<int, 16> NewMask;
1089 if (DestEltSize <= SrcEltSize) {
1090 // The bitcast is from wide to narrow/equal elements. The shuffle mask can
1091 // always be expanded to the equivalent form choosing narrower elements.
1092 assert(SrcEltSize % DestEltSize == 0 && "Unexpected shuffle mask");
1093 unsigned ScaleFactor = SrcEltSize / DestEltSize;
1094 narrowShuffleMaskElts(ScaleFactor, Mask, NewMask);
1095 } else {
1096 // The bitcast is from narrow elements to wide elements. The shuffle mask
1097 // must choose consecutive elements to allow casting first.
1098 assert(DestEltSize % SrcEltSize == 0 && "Unexpected shuffle mask");
1099 unsigned ScaleFactor = DestEltSize / SrcEltSize;
1100 if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask))
1101 return false;
1102 }
1103
1104 // Bitcast the shuffle src - keep its original width but using the destination
1105 // scalar type.
1106 unsigned NumSrcElts = SrcTy->getPrimitiveSizeInBits() / DestEltSize;
1107 auto *NewShuffleTy =
1108 FixedVectorType::get(DestTy->getScalarType(), NumSrcElts);
1109 auto *OldShuffleTy =
1110 FixedVectorType::get(SrcTy->getScalarType(), Mask.size());
1111 unsigned NumOps = IsUnary ? 1 : 2;
1112
1113 // The new shuffle must not cost more than the old shuffle.
1117
1118 InstructionCost NewCost =
1119 TTI.getShuffleCost(SK, DestTy, NewShuffleTy, NewMask, CostKind) +
1120 (NumOps * TTI.getCastInstrCost(Instruction::BitCast, NewShuffleTy, SrcTy,
1121 TargetTransformInfo::CastContextHint::None,
1122 CostKind));
1123 InstructionCost OldCost =
1124 TTI.getShuffleCost(SK, OldShuffleTy, SrcTy, Mask, CostKind) +
1125 TTI.getCastInstrCost(Instruction::BitCast, DestTy, OldShuffleTy,
1126 TargetTransformInfo::CastContextHint::None,
1127 CostKind);
1128
1129 LLVM_DEBUG(dbgs() << "Found a bitcasted shuffle: " << I << "\n OldCost: "
1130 << OldCost << " vs NewCost: " << NewCost << "\n");
1131
1132 if (NewCost > OldCost || !NewCost.isValid())
1133 return false;
1134
1135 // bitcast (shuf V0, V1, MaskC) --> shuf (bitcast V0), (bitcast V1), MaskC'
1136 ++NumShufOfBitcast;
1137 Value *CastV0 = Builder.CreateBitCast(peekThroughBitcasts(V0), NewShuffleTy);
1138 Value *CastV1 = Builder.CreateBitCast(peekThroughBitcasts(V1), NewShuffleTy);
1139 Value *Shuf = Builder.CreateShuffleVector(CastV0, CastV1, NewMask);
1140 replaceValue(I, *Shuf);
1141 return true;
1142}
1143
1144/// VP Intrinsics whose vector operands are both splat values may be simplified
1145/// into the scalar version of the operation and the result splatted. This
1146/// can lead to scalarization down the line.
1147bool VectorCombine::scalarizeVPIntrinsic(Instruction &I) {
1148 if (!isa<VPIntrinsic>(I))
1149 return false;
1150 VPIntrinsic &VPI = cast<VPIntrinsic>(I);
1151 Value *Op0 = VPI.getArgOperand(0);
1152 Value *Op1 = VPI.getArgOperand(1);
1153
1154 if (!isSplatValue(Op0) || !isSplatValue(Op1))
1155 return false;
1156
1157 // Check getSplatValue early in this function, to avoid doing unnecessary
1158 // work.
1159 Value *ScalarOp0 = getSplatValue(Op0);
1160 Value *ScalarOp1 = getSplatValue(Op1);
1161 if (!ScalarOp0 || !ScalarOp1)
1162 return false;
1163
1164 // For the binary VP intrinsics supported here, the result on disabled lanes
1165 // is a poison value. For now, only do this simplification if all lanes
1166 // are active.
1167 // TODO: Relax the condition that all lanes are active by using insertelement
1168 // on inactive lanes.
1169 auto IsAllTrueMask = [](Value *MaskVal) {
1170 if (Value *SplattedVal = getSplatValue(MaskVal))
1171 if (auto *ConstValue = dyn_cast<Constant>(SplattedVal))
1172 return ConstValue->isAllOnesValue();
1173 return false;
1174 };
1175 if (!IsAllTrueMask(VPI.getArgOperand(2)))
1176 return false;
1177
1178 // Check to make sure we support scalarization of the intrinsic
1179 Intrinsic::ID IntrID = VPI.getIntrinsicID();
1180 if (!VPBinOpIntrinsic::isVPBinOp(IntrID))
1181 return false;
1182
1183 // Calculate cost of splatting both operands into vectors and the vector
1184 // intrinsic
1185 VectorType *VecTy = cast<VectorType>(VPI.getType());
1186 SmallVector<int> Mask;
1187 if (auto *FVTy = dyn_cast<FixedVectorType>(VecTy))
1188 Mask.resize(FVTy->getNumElements(), 0);
1189 InstructionCost SplatCost =
1190 TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, CostKind, 0) +
1192 CostKind);
1193
1194 // Calculate the cost of the VP Intrinsic
1196 for (Value *V : VPI.args())
1197 Args.push_back(V->getType());
1198 IntrinsicCostAttributes Attrs(IntrID, VecTy, Args);
1199 InstructionCost VectorOpCost = TTI.getIntrinsicInstrCost(Attrs, CostKind);
1200 InstructionCost OldCost = 2 * SplatCost + VectorOpCost;
1201
1202 // Determine scalar opcode
1203 std::optional<unsigned> FunctionalOpcode =
1204 VPI.getFunctionalOpcode();
1205 std::optional<Intrinsic::ID> ScalarIntrID = std::nullopt;
1206 if (!FunctionalOpcode) {
1207 ScalarIntrID = VPI.getFunctionalIntrinsicID();
1208 if (!ScalarIntrID)
1209 return false;
1210 }
1211
1212 // Calculate cost of scalarizing
1213 InstructionCost ScalarOpCost = 0;
1214 if (ScalarIntrID) {
1215 IntrinsicCostAttributes Attrs(*ScalarIntrID, VecTy->getScalarType(), Args);
1216 ScalarOpCost = TTI.getIntrinsicInstrCost(Attrs, CostKind);
1217 } else {
1218 ScalarOpCost = TTI.getArithmeticInstrCost(*FunctionalOpcode,
1219 VecTy->getScalarType(), CostKind);
1220 }
1221
1222 // The existing splats may be kept around if other instructions use them.
1223 InstructionCost CostToKeepSplats =
1224 (SplatCost * !Op0->hasOneUse()) + (SplatCost * !Op1->hasOneUse());
1225 InstructionCost NewCost = ScalarOpCost + SplatCost + CostToKeepSplats;
1226
1227 LLVM_DEBUG(dbgs() << "Found a VP Intrinsic to scalarize: " << VPI
1228 << "\n");
1229 LLVM_DEBUG(dbgs() << "Cost of Intrinsic: " << OldCost
1230 << ", Cost of scalarizing:" << NewCost << "\n");
1231
1232 // We want to scalarize unless the vector variant actually has lower cost.
1233 if (OldCost < NewCost || !NewCost.isValid())
1234 return false;
1235
1236 // Scalarize the intrinsic
1237 ElementCount EC = cast<VectorType>(Op0->getType())->getElementCount();
1238 Value *EVL = VPI.getArgOperand(3);
1239
1240 // If the VP op might introduce UB or poison, we can scalarize it provided
1241 // that we know the EVL > 0: If the EVL is zero, then the original VP op
1242 // becomes a no-op and thus won't be UB, so make sure we don't introduce UB by
1243 // scalarizing it.
1244 bool SafeToSpeculate;
1245 if (ScalarIntrID)
1246 SafeToSpeculate = Intrinsic::getFnAttributes(I.getContext(), *ScalarIntrID)
1247 .hasAttribute(Attribute::AttrKind::Speculatable);
1248 else
1250 *FunctionalOpcode, &VPI, nullptr, &AC, &DT);
1251 if (!SafeToSpeculate &&
1252 !isKnownNonZero(EVL, SimplifyQuery(*DL, &DT, &AC, &VPI)))
1253 return false;
1254
1255 Value *ScalarVal =
1256 ScalarIntrID
1257 ? Builder.CreateIntrinsic(VecTy->getScalarType(), *ScalarIntrID,
1258 {ScalarOp0, ScalarOp1})
1259 : Builder.CreateBinOp((Instruction::BinaryOps)(*FunctionalOpcode),
1260 ScalarOp0, ScalarOp1);
1261
1262 replaceValue(VPI, *Builder.CreateVectorSplat(EC, ScalarVal));
1263 return true;
1264}
1265
1266/// Match a vector op/compare/intrinsic with at least one
1267/// inserted scalar operand and convert to scalar op/cmp/intrinsic followed
1268/// by insertelement.
1269bool VectorCombine::scalarizeOpOrCmp(Instruction &I) {
1270 auto *UO = dyn_cast<UnaryOperator>(&I);
1271 auto *BO = dyn_cast<BinaryOperator>(&I);
1272 auto *CI = dyn_cast<CmpInst>(&I);
1273 auto *II = dyn_cast<IntrinsicInst>(&I);
1274 if (!UO && !BO && !CI && !II)
1275 return false;
1276
1277 // TODO: Allow intrinsics with different argument types
1278 if (II) {
1279 if (!isTriviallyVectorizable(II->getIntrinsicID()))
1280 return false;
1281 for (auto [Idx, Arg] : enumerate(II->args()))
1282 if (Arg->getType() != II->getType() &&
1283 !isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Idx, &TTI))
1284 return false;
1285 }
1286
1287 // Do not convert the vector condition of a vector select into a scalar
1288 // condition. That may cause problems for codegen because of differences in
1289 // boolean formats and register-file transfers.
1290 // TODO: Can we account for that in the cost model?
1291 if (CI)
1292 for (User *U : I.users())
1293 if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value())))
1294 return false;
1295
1296 // Match constant vectors or scalars being inserted into constant vectors:
1297 // vec_op [VecC0 | (inselt VecC0, V0, Index)], ...
1298 SmallVector<Value *> VecCs, ScalarOps;
1299 std::optional<uint64_t> Index;
1300
1301 auto Ops = II ? II->args() : I.operands();
1302 for (auto [OpNum, Op] : enumerate(Ops)) {
1303 Constant *VecC;
1304 Value *V;
1305 uint64_t InsIdx = 0;
1306 if (match(Op.get(), m_InsertElt(m_Constant(VecC), m_Value(V),
1307 m_ConstantInt(InsIdx)))) {
1308 // Bail if any inserts are out of bounds.
1309 VectorType *OpTy = cast<VectorType>(Op->getType());
1310 if (OpTy->getElementCount().getKnownMinValue() <= InsIdx)
1311 return false;
1312 // All inserts must have the same index.
1313 // TODO: Deal with mismatched index constants and variable indexes?
1314 if (!Index)
1315 Index = InsIdx;
1316 else if (InsIdx != *Index)
1317 return false;
1318 VecCs.push_back(VecC);
1319 ScalarOps.push_back(V);
1320 } else if (II && isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(),
1321 OpNum, &TTI)) {
1322 VecCs.push_back(Op.get());
1323 ScalarOps.push_back(Op.get());
1324 } else if (match(Op.get(), m_Constant(VecC))) {
1325 VecCs.push_back(VecC);
1326 ScalarOps.push_back(nullptr);
1327 } else {
1328 return false;
1329 }
1330 }
1331
1332 // Bail if all operands are constant.
1333 if (!Index.has_value())
1334 return false;
1335
1336 VectorType *VecTy = cast<VectorType>(I.getType());
1337 Type *ScalarTy = VecTy->getScalarType();
1338 assert(VecTy->isVectorTy() &&
1339 (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() ||
1340 ScalarTy->isPointerTy()) &&
1341 "Unexpected types for insert element into binop or cmp");
1342
1343 unsigned Opcode = I.getOpcode();
1344 InstructionCost ScalarOpCost, VectorOpCost;
1345 if (CI) {
1346 CmpInst::Predicate Pred = CI->getPredicate();
1347 ScalarOpCost = TTI.getCmpSelInstrCost(
1348 Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred, CostKind);
1349 VectorOpCost = TTI.getCmpSelInstrCost(
1350 Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred, CostKind);
1351 } else if (UO || BO) {
1352 ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy, CostKind);
1353 VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy, CostKind);
1354 } else {
1355 IntrinsicCostAttributes ScalarICA(
1356 II->getIntrinsicID(), ScalarTy,
1357 SmallVector<Type *>(II->arg_size(), ScalarTy));
1358 ScalarOpCost = TTI.getIntrinsicInstrCost(ScalarICA, CostKind);
1359 IntrinsicCostAttributes VectorICA(
1360 II->getIntrinsicID(), VecTy,
1361 SmallVector<Type *>(II->arg_size(), VecTy));
1362 VectorOpCost = TTI.getIntrinsicInstrCost(VectorICA, CostKind);
1363 }
1364
1365 // Fold the vector constants in the original vectors into a new base vector to
1366 // get more accurate cost modelling.
1367 Value *NewVecC = nullptr;
1368 if (CI)
1369 NewVecC = simplifyCmpInst(CI->getPredicate(), VecCs[0], VecCs[1], SQ);
1370 else if (UO)
1371 NewVecC =
1372 simplifyUnOp(UO->getOpcode(), VecCs[0], UO->getFastMathFlags(), SQ);
1373 else if (BO)
1374 NewVecC = simplifyBinOp(BO->getOpcode(), VecCs[0], VecCs[1], SQ);
1375 else if (II)
1376 NewVecC = simplifyCall(II, II->getCalledOperand(), VecCs, SQ);
1377
1378 if (!NewVecC)
1379 return false;
1380
1381 // Get cost estimate for the insert element. This cost will factor into
1382 // both sequences.
1383 InstructionCost OldCost = VectorOpCost;
1384 InstructionCost NewCost =
1385 ScalarOpCost + TTI.getVectorInstrCost(Instruction::InsertElement, VecTy,
1386 CostKind, *Index, NewVecC);
1387
1388 for (auto [Idx, Op, VecC, Scalar] : enumerate(Ops, VecCs, ScalarOps)) {
1389 if (!Scalar || (II && isVectorIntrinsicWithScalarOpAtArg(
1390 II->getIntrinsicID(), Idx, &TTI)))
1391 continue;
1393 Instruction::InsertElement, VecTy, CostKind, *Index, VecC, Scalar);
1394 OldCost += InsertCost;
1395 NewCost += !Op->hasOneUse() * InsertCost;
1396 }
1397
1398 // We want to scalarize unless the vector variant actually has lower cost.
1399 if (OldCost < NewCost || !NewCost.isValid())
1400 return false;
1401
1402 // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) -->
1403 // inselt NewVecC, (scalar_op V0, V1), Index
1404 if (CI)
1405 ++NumScalarCmp;
1406 else if (UO || BO)
1407 ++NumScalarOps;
1408 else
1409 ++NumScalarIntrinsic;
1410
1411 // For constant cases, extract the scalar element, this should constant fold.
1412 for (auto [OpIdx, Scalar, VecC] : enumerate(ScalarOps, VecCs))
1413 if (!Scalar)
1415 cast<Constant>(VecC), Builder.getInt64(*Index));
1416
1417 Value *Scalar;
1418 if (CI)
1419 Scalar = Builder.CreateCmp(CI->getPredicate(), ScalarOps[0], ScalarOps[1]);
1420 else if (UO || BO)
1421 Scalar = Builder.CreateNAryOp(Opcode, ScalarOps);
1422 else
1423 Scalar = Builder.CreateIntrinsic(ScalarTy, II->getIntrinsicID(), ScalarOps);
1424
1425 Scalar->setName(I.getName() + ".scalar");
1426
1427 // All IR flags are safe to back-propagate. There is no potential for extra
1428 // poison to be created by the scalar instruction.
1429 if (auto *ScalarInst = dyn_cast<Instruction>(Scalar))
1430 ScalarInst->copyIRFlags(&I);
1431
1432 Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, *Index);
1433 replaceValue(I, *Insert);
1434 return true;
1435}
1436
1437/// Try to combine a scalar binop + 2 scalar compares of extracted elements of
1438/// a vector into vector operations followed by extract. Note: The SLP pass
1439/// may miss this pattern because of implementation problems.
1440bool VectorCombine::foldExtractedCmps(Instruction &I) {
1441 auto *BI = dyn_cast<BinaryOperator>(&I);
1442
1443 // We are looking for a scalar binop of booleans.
1444 // binop i1 (cmp Pred I0, C0), (cmp Pred I1, C1)
1445 if (!BI || !I.getType()->isIntegerTy(1))
1446 return false;
1447
1448 // The compare predicates should match, and each compare should have a
1449 // constant operand.
1450 Value *B0 = I.getOperand(0), *B1 = I.getOperand(1);
1451 Instruction *I0, *I1;
1452 Constant *C0, *C1;
1453 CmpPredicate P0, P1;
1454 if (!match(B0, m_Cmp(P0, m_Instruction(I0), m_Constant(C0))) ||
1455 !match(B1, m_Cmp(P1, m_Instruction(I1), m_Constant(C1))))
1456 return false;
1457
1458 auto MatchingPred = CmpPredicate::getMatching(P0, P1);
1459 if (!MatchingPred)
1460 return false;
1461
1462 // The compare operands must be extracts of the same vector with constant
1463 // extract indexes.
1464 Value *X;
1465 uint64_t Index0, Index1;
1466 if (!match(I0, m_ExtractElt(m_Value(X), m_ConstantInt(Index0))) ||
1467 !match(I1, m_ExtractElt(m_Specific(X), m_ConstantInt(Index1))))
1468 return false;
1469
1470 auto *Ext0 = cast<ExtractElementInst>(I0);
1471 auto *Ext1 = cast<ExtractElementInst>(I1);
1472 ExtractElementInst *ConvertToShuf = getShuffleExtract(Ext0, Ext1, CostKind);
1473 if (!ConvertToShuf)
1474 return false;
1475 assert((ConvertToShuf == Ext0 || ConvertToShuf == Ext1) &&
1476 "Unknown ExtractElementInst");
1477
1478 // The original scalar pattern is:
1479 // binop i1 (cmp Pred (ext X, Index0), C0), (cmp Pred (ext X, Index1), C1)
1480 CmpInst::Predicate Pred = *MatchingPred;
1481 unsigned CmpOpcode =
1482 CmpInst::isFPPredicate(Pred) ? Instruction::FCmp : Instruction::ICmp;
1483 auto *VecTy = dyn_cast<FixedVectorType>(X->getType());
1484 if (!VecTy)
1485 return false;
1486
1487 InstructionCost Ext0Cost =
1488 TTI.getVectorInstrCost(*Ext0, VecTy, CostKind, Index0);
1489 InstructionCost Ext1Cost =
1490 TTI.getVectorInstrCost(*Ext1, VecTy, CostKind, Index1);
1492 CmpOpcode, I0->getType(), CmpInst::makeCmpResultType(I0->getType()), Pred,
1493 CostKind);
1494
1495 InstructionCost OldCost =
1496 Ext0Cost + Ext1Cost + CmpCost * 2 +
1497 TTI.getArithmeticInstrCost(I.getOpcode(), I.getType(), CostKind);
1498
1499 // The proposed vector pattern is:
1500 // vcmp = cmp Pred X, VecC
1501 // ext (binop vNi1 vcmp, (shuffle vcmp, Index1)), Index0
1502 int CheapIndex = ConvertToShuf == Ext0 ? Index1 : Index0;
1503 int ExpensiveIndex = ConvertToShuf == Ext0 ? Index0 : Index1;
1506 CmpOpcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred, CostKind);
1507 SmallVector<int, 32> ShufMask(VecTy->getNumElements(), PoisonMaskElem);
1508 ShufMask[CheapIndex] = ExpensiveIndex;
1510 CmpTy, ShufMask, CostKind);
1511 NewCost += TTI.getArithmeticInstrCost(I.getOpcode(), CmpTy, CostKind);
1512 NewCost += TTI.getVectorInstrCost(*Ext0, CmpTy, CostKind, CheapIndex);
1513 NewCost += Ext0->hasOneUse() ? 0 : Ext0Cost;
1514 NewCost += Ext1->hasOneUse() ? 0 : Ext1Cost;
1515
1516 // Aggressively form vector ops if the cost is equal because the transform
1517 // may enable further optimization.
1518 // Codegen can reverse this transform (scalarize) if it was not profitable.
1519 if (OldCost < NewCost || !NewCost.isValid())
1520 return false;
1521
1522 // Create a vector constant from the 2 scalar constants.
1523 SmallVector<Constant *, 32> CmpC(VecTy->getNumElements(),
1524 PoisonValue::get(VecTy->getElementType()));
1525 CmpC[Index0] = C0;
1526 CmpC[Index1] = C1;
1527 Value *VCmp = Builder.CreateCmp(Pred, X, ConstantVector::get(CmpC));
1528 Value *Shuf = createShiftShuffle(VCmp, ExpensiveIndex, CheapIndex, Builder);
1529 Value *LHS = ConvertToShuf == Ext0 ? Shuf : VCmp;
1530 Value *RHS = ConvertToShuf == Ext0 ? VCmp : Shuf;
1531 Value *VecLogic = Builder.CreateBinOp(BI->getOpcode(), LHS, RHS);
1532 Value *NewExt = Builder.CreateExtractElement(VecLogic, CheapIndex);
1533 replaceValue(I, *NewExt);
1534 ++NumVecCmpBO;
1535 return true;
1536}
1537
1540 const TargetTransformInfo &TTI,
1541 InstructionCost &CostBeforeReduction,
1542 InstructionCost &CostAfterReduction) {
1543 Instruction *Op0, *Op1;
1544 auto *RedOp = dyn_cast<Instruction>(II.getOperand(0));
1545 auto *VecRedTy = cast<VectorType>(II.getOperand(0)->getType());
1546 unsigned ReductionOpc =
1547 getArithmeticReductionInstruction(II.getIntrinsicID());
1548 if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value()))) {
1549 bool IsUnsigned = isa<ZExtInst>(RedOp);
1550 auto *ExtType = cast<VectorType>(RedOp->getOperand(0)->getType());
1551
1552 CostBeforeReduction =
1553 TTI.getCastInstrCost(RedOp->getOpcode(), VecRedTy, ExtType,
1555 CostAfterReduction =
1556 TTI.getExtendedReductionCost(ReductionOpc, IsUnsigned, II.getType(),
1557 ExtType, FastMathFlags(), CostKind);
1558 return;
1559 }
1560 if (RedOp && II.getIntrinsicID() == Intrinsic::vector_reduce_add &&
1561 match(RedOp,
1563 match(Op0, m_ZExtOrSExt(m_Value())) &&
1564 Op0->getOpcode() == Op1->getOpcode() &&
1565 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
1566 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
1567 // Matched reduce.add(ext(mul(ext(A), ext(B)))
1568 bool IsUnsigned = isa<ZExtInst>(Op0);
1569 auto *ExtType = cast<VectorType>(Op0->getOperand(0)->getType());
1570 VectorType *MulType = VectorType::get(Op0->getType(), VecRedTy);
1571
1572 InstructionCost ExtCost =
1573 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
1575 InstructionCost MulCost =
1576 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
1577 InstructionCost Ext2Cost =
1578 TTI.getCastInstrCost(RedOp->getOpcode(), VecRedTy, MulType,
1580
1581 CostBeforeReduction = ExtCost * 2 + MulCost + Ext2Cost;
1582 CostAfterReduction = TTI.getMulAccReductionCost(
1583 IsUnsigned, ReductionOpc, II.getType(), ExtType, CostKind);
1584 return;
1585 }
1586 CostAfterReduction = TTI.getArithmeticReductionCost(ReductionOpc, VecRedTy,
1587 std::nullopt, CostKind);
1588}
1589
1590bool VectorCombine::foldBinopOfReductions(Instruction &I) {
1591 Instruction::BinaryOps BinOpOpc = cast<BinaryOperator>(&I)->getOpcode();
1592 Intrinsic::ID ReductionIID = getReductionForBinop(BinOpOpc);
1593 if (BinOpOpc == Instruction::Sub)
1594 ReductionIID = Intrinsic::vector_reduce_add;
1595 if (ReductionIID == Intrinsic::not_intrinsic)
1596 return false;
1597
1598 auto checkIntrinsicAndGetItsArgument = [](Value *V,
1599 Intrinsic::ID IID) -> Value * {
1600 auto *II = dyn_cast<IntrinsicInst>(V);
1601 if (!II)
1602 return nullptr;
1603 if (II->getIntrinsicID() == IID && II->hasOneUse())
1604 return II->getArgOperand(0);
1605 return nullptr;
1606 };
1607
1608 Value *V0 = checkIntrinsicAndGetItsArgument(I.getOperand(0), ReductionIID);
1609 if (!V0)
1610 return false;
1611 Value *V1 = checkIntrinsicAndGetItsArgument(I.getOperand(1), ReductionIID);
1612 if (!V1)
1613 return false;
1614
1615 auto *VTy = cast<VectorType>(V0->getType());
1616 if (V1->getType() != VTy)
1617 return false;
1618 const auto &II0 = *cast<IntrinsicInst>(I.getOperand(0));
1619 const auto &II1 = *cast<IntrinsicInst>(I.getOperand(1));
1620 unsigned ReductionOpc =
1621 getArithmeticReductionInstruction(II0.getIntrinsicID());
1622
1623 InstructionCost OldCost = 0;
1624 InstructionCost NewCost = 0;
1625 InstructionCost CostOfRedOperand0 = 0;
1626 InstructionCost CostOfRed0 = 0;
1627 InstructionCost CostOfRedOperand1 = 0;
1628 InstructionCost CostOfRed1 = 0;
1629 analyzeCostOfVecReduction(II0, CostKind, TTI, CostOfRedOperand0, CostOfRed0);
1630 analyzeCostOfVecReduction(II1, CostKind, TTI, CostOfRedOperand1, CostOfRed1);
1631 OldCost = CostOfRed0 + CostOfRed1 + TTI.getInstructionCost(&I, CostKind);
1632 NewCost =
1633 CostOfRedOperand0 + CostOfRedOperand1 +
1634 TTI.getArithmeticInstrCost(BinOpOpc, VTy, CostKind) +
1635 TTI.getArithmeticReductionCost(ReductionOpc, VTy, std::nullopt, CostKind);
1636 if (NewCost >= OldCost || !NewCost.isValid())
1637 return false;
1638
1639 LLVM_DEBUG(dbgs() << "Found two mergeable reductions: " << I
1640 << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost
1641 << "\n");
1642 Value *VectorBO;
1643 if (BinOpOpc == Instruction::Or)
1644 VectorBO = Builder.CreateOr(V0, V1, "",
1645 cast<PossiblyDisjointInst>(I).isDisjoint());
1646 else
1647 VectorBO = Builder.CreateBinOp(BinOpOpc, V0, V1);
1648
1649 Instruction *Rdx = Builder.CreateIntrinsic(ReductionIID, {VTy}, {VectorBO});
1650 replaceValue(I, *Rdx);
1651 return true;
1652}
1653
1654// Check if memory loc modified between two instrs in the same BB
1657 const MemoryLocation &Loc, AAResults &AA) {
1658 unsigned NumScanned = 0;
1659 return std::any_of(Begin, End, [&](const Instruction &Instr) {
1660 return isModSet(AA.getModRefInfo(&Instr, Loc)) ||
1661 ++NumScanned > MaxInstrsToScan;
1662 });
1663}
1664
1665namespace {
1666/// Helper class to indicate whether a vector index can be safely scalarized and
1667/// if a freeze needs to be inserted.
1668class ScalarizationResult {
1669 enum class StatusTy { Unsafe, Safe, SafeWithFreeze };
1670
1671 StatusTy Status;
1672 Value *ToFreeze;
1673
1674 ScalarizationResult(StatusTy Status, Value *ToFreeze = nullptr)
1675 : Status(Status), ToFreeze(ToFreeze) {}
1676
1677public:
1678 ScalarizationResult(const ScalarizationResult &Other) = default;
1679 ~ScalarizationResult() {
1680 assert(!ToFreeze && "freeze() not called with ToFreeze being set");
1681 }
1682
1683 static ScalarizationResult unsafe() { return {StatusTy::Unsafe}; }
1684 static ScalarizationResult safe() { return {StatusTy::Safe}; }
1685 static ScalarizationResult safeWithFreeze(Value *ToFreeze) {
1686 return {StatusTy::SafeWithFreeze, ToFreeze};
1687 }
1688
1689 /// Returns true if the index can be scalarize without requiring a freeze.
1690 bool isSafe() const { return Status == StatusTy::Safe; }
1691 /// Returns true if the index cannot be scalarized.
1692 bool isUnsafe() const { return Status == StatusTy::Unsafe; }
1693 /// Returns true if the index can be scalarize, but requires inserting a
1694 /// freeze.
1695 bool isSafeWithFreeze() const { return Status == StatusTy::SafeWithFreeze; }
1696
1697 /// Reset the state of Unsafe and clear ToFreze if set.
1698 void discard() {
1699 ToFreeze = nullptr;
1700 Status = StatusTy::Unsafe;
1701 }
1702
1703 /// Freeze the ToFreeze and update the use in \p User to use it.
1704 void freeze(IRBuilderBase &Builder, Instruction &UserI) {
1705 assert(isSafeWithFreeze() &&
1706 "should only be used when freezing is required");
1707 assert(is_contained(ToFreeze->users(), &UserI) &&
1708 "UserI must be a user of ToFreeze");
1709 IRBuilder<>::InsertPointGuard Guard(Builder);
1710 Builder.SetInsertPoint(cast<Instruction>(&UserI));
1711 Value *Frozen =
1712 Builder.CreateFreeze(ToFreeze, ToFreeze->getName() + ".frozen");
1713 for (Use &U : make_early_inc_range((UserI.operands())))
1714 if (U.get() == ToFreeze)
1715 U.set(Frozen);
1716
1717 ToFreeze = nullptr;
1718 }
1719};
1720} // namespace
1721
1722/// Check if it is legal to scalarize a memory access to \p VecTy at index \p
1723/// Idx. \p Idx must access a valid vector element.
1724static ScalarizationResult canScalarizeAccess(VectorType *VecTy, Value *Idx,
1725 Instruction *CtxI,
1726 AssumptionCache &AC,
1727 const DominatorTree &DT) {
1728 // We do checks for both fixed vector types and scalable vector types.
1729 // This is the number of elements of fixed vector types,
1730 // or the minimum number of elements of scalable vector types.
1731 uint64_t NumElements = VecTy->getElementCount().getKnownMinValue();
1732 unsigned IntWidth = Idx->getType()->getScalarSizeInBits();
1733
1734 if (auto *C = dyn_cast<ConstantInt>(Idx)) {
1735 if (C->getValue().ult(NumElements))
1736 return ScalarizationResult::safe();
1737 return ScalarizationResult::unsafe();
1738 }
1739
1740 // Always unsafe if the index type can't handle all inbound values.
1741 if (!llvm::isUIntN(IntWidth, NumElements))
1742 return ScalarizationResult::unsafe();
1743
1744 APInt Zero(IntWidth, 0);
1745 APInt MaxElts(IntWidth, NumElements);
1746 ConstantRange ValidIndices(Zero, MaxElts);
1747 ConstantRange IdxRange(IntWidth, true);
1748
1749 if (isGuaranteedNotToBePoison(Idx, &AC)) {
1750 if (ValidIndices.contains(computeConstantRange(Idx, /* ForSigned */ false,
1751 true, &AC, CtxI, &DT)))
1752 return ScalarizationResult::safe();
1753 return ScalarizationResult::unsafe();
1754 }
1755
1756 // If the index may be poison, check if we can insert a freeze before the
1757 // range of the index is restricted.
1758 Value *IdxBase;
1759 ConstantInt *CI;
1760 if (match(Idx, m_And(m_Value(IdxBase), m_ConstantInt(CI)))) {
1761 IdxRange = IdxRange.binaryAnd(CI->getValue());
1762 } else if (match(Idx, m_URem(m_Value(IdxBase), m_ConstantInt(CI)))) {
1763 IdxRange = IdxRange.urem(CI->getValue());
1764 }
1765
1766 if (ValidIndices.contains(IdxRange))
1767 return ScalarizationResult::safeWithFreeze(IdxBase);
1768 return ScalarizationResult::unsafe();
1769}
1770
1771/// The memory operation on a vector of \p ScalarType had alignment of
1772/// \p VectorAlignment. Compute the maximal, but conservatively correct,
1773/// alignment that will be valid for the memory operation on a single scalar
1774/// element of the same type with index \p Idx.
1776 Type *ScalarType, Value *Idx,
1777 const DataLayout &DL) {
1778 if (auto *C = dyn_cast<ConstantInt>(Idx))
1779 return commonAlignment(VectorAlignment,
1780 C->getZExtValue() * DL.getTypeStoreSize(ScalarType));
1781 return commonAlignment(VectorAlignment, DL.getTypeStoreSize(ScalarType));
1782}
1783
1784// Combine patterns like:
1785// %0 = load <4 x i32>, <4 x i32>* %a
1786// %1 = insertelement <4 x i32> %0, i32 %b, i32 1
1787// store <4 x i32> %1, <4 x i32>* %a
1788// to:
1789// %0 = bitcast <4 x i32>* %a to i32*
1790// %1 = getelementptr inbounds i32, i32* %0, i64 0, i64 1
1791// store i32 %b, i32* %1
1792bool VectorCombine::foldSingleElementStore(Instruction &I) {
1794 return false;
1795 auto *SI = cast<StoreInst>(&I);
1796 if (!SI->isSimple() || !isa<VectorType>(SI->getValueOperand()->getType()))
1797 return false;
1798
1799 // TODO: Combine more complicated patterns (multiple insert) by referencing
1800 // TargetTransformInfo.
1802 Value *NewElement;
1803 Value *Idx;
1804 if (!match(SI->getValueOperand(),
1805 m_InsertElt(m_Instruction(Source), m_Value(NewElement),
1806 m_Value(Idx))))
1807 return false;
1808
1809 if (auto *Load = dyn_cast<LoadInst>(Source)) {
1810 auto VecTy = cast<VectorType>(SI->getValueOperand()->getType());
1811 Value *SrcAddr = Load->getPointerOperand()->stripPointerCasts();
1812 // Don't optimize for atomic/volatile load or store. Ensure memory is not
1813 // modified between, vector type matches store size, and index is inbounds.
1814 if (!Load->isSimple() || Load->getParent() != SI->getParent() ||
1815 !DL->typeSizeEqualsStoreSize(Load->getType()->getScalarType()) ||
1816 SrcAddr != SI->getPointerOperand()->stripPointerCasts())
1817 return false;
1818
1819 auto ScalarizableIdx = canScalarizeAccess(VecTy, Idx, Load, AC, DT);
1820 if (ScalarizableIdx.isUnsafe() ||
1821 isMemModifiedBetween(Load->getIterator(), SI->getIterator(),
1822 MemoryLocation::get(SI), AA))
1823 return false;
1824
1825 // Ensure we add the load back to the worklist BEFORE its users so they can
1826 // erased in the correct order.
1827 Worklist.push(Load);
1828
1829 if (ScalarizableIdx.isSafeWithFreeze())
1830 ScalarizableIdx.freeze(Builder, *cast<Instruction>(Idx));
1831 Value *GEP = Builder.CreateInBoundsGEP(
1832 SI->getValueOperand()->getType(), SI->getPointerOperand(),
1833 {ConstantInt::get(Idx->getType(), 0), Idx});
1834 StoreInst *NSI = Builder.CreateStore(NewElement, GEP);
1835 NSI->copyMetadata(*SI);
1836 Align ScalarOpAlignment = computeAlignmentAfterScalarization(
1837 std::max(SI->getAlign(), Load->getAlign()), NewElement->getType(), Idx,
1838 *DL);
1839 NSI->setAlignment(ScalarOpAlignment);
1840 replaceValue(I, *NSI);
1842 return true;
1843 }
1844
1845 return false;
1846}
1847
1848/// Try to scalarize vector loads feeding extractelement instructions.
1849bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
1851 return false;
1852
1853 Value *Ptr;
1854 if (!match(&I, m_Load(m_Value(Ptr))))
1855 return false;
1856
1857 auto *LI = cast<LoadInst>(&I);
1858 auto *VecTy = cast<VectorType>(LI->getType());
1859 if (LI->isVolatile() || !DL->typeSizeEqualsStoreSize(VecTy->getScalarType()))
1860 return false;
1861
1862 InstructionCost OriginalCost =
1863 TTI.getMemoryOpCost(Instruction::Load, VecTy, LI->getAlign(),
1864 LI->getPointerAddressSpace(), CostKind);
1865 InstructionCost ScalarizedCost = 0;
1866
1867 Instruction *LastCheckedInst = LI;
1868 unsigned NumInstChecked = 0;
1869 DenseMap<ExtractElementInst *, ScalarizationResult> NeedFreeze;
1870 auto FailureGuard = make_scope_exit([&]() {
1871 // If the transform is aborted, discard the ScalarizationResults.
1872 for (auto &Pair : NeedFreeze)
1873 Pair.second.discard();
1874 });
1875
1876 // Check if all users of the load are extracts with no memory modifications
1877 // between the load and the extract. Compute the cost of both the original
1878 // code and the scalarized version.
1879 for (User *U : LI->users()) {
1880 auto *UI = dyn_cast<ExtractElementInst>(U);
1881 if (!UI || UI->getParent() != LI->getParent())
1882 return false;
1883
1884 // If any extract is waiting to be erased, then bail out as this will
1885 // distort the cost calculation and possibly lead to infinite loops.
1886 if (UI->use_empty())
1887 return false;
1888
1889 // Check if any instruction between the load and the extract may modify
1890 // memory.
1891 if (LastCheckedInst->comesBefore(UI)) {
1892 for (Instruction &I :
1893 make_range(std::next(LI->getIterator()), UI->getIterator())) {
1894 // Bail out if we reached the check limit or the instruction may write
1895 // to memory.
1896 if (NumInstChecked == MaxInstrsToScan || I.mayWriteToMemory())
1897 return false;
1898 NumInstChecked++;
1899 }
1900 LastCheckedInst = UI;
1901 }
1902
1903 auto ScalarIdx =
1904 canScalarizeAccess(VecTy, UI->getIndexOperand(), LI, AC, DT);
1905 if (ScalarIdx.isUnsafe())
1906 return false;
1907 if (ScalarIdx.isSafeWithFreeze()) {
1908 NeedFreeze.try_emplace(UI, ScalarIdx);
1909 ScalarIdx.discard();
1910 }
1911
1912 auto *Index = dyn_cast<ConstantInt>(UI->getIndexOperand());
1913 OriginalCost +=
1914 TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, CostKind,
1915 Index ? Index->getZExtValue() : -1);
1916 ScalarizedCost +=
1917 TTI.getMemoryOpCost(Instruction::Load, VecTy->getElementType(),
1918 Align(1), LI->getPointerAddressSpace(), CostKind);
1919 ScalarizedCost += TTI.getAddressComputationCost(LI->getPointerOperandType(),
1920 nullptr, nullptr, CostKind);
1921 }
1922
1923 LLVM_DEBUG(dbgs() << "Found all extractions of a vector load: " << I
1924 << "\n LoadExtractCost: " << OriginalCost
1925 << " vs ScalarizedCost: " << ScalarizedCost << "\n");
1926
1927 if (ScalarizedCost >= OriginalCost)
1928 return false;
1929
1930 // Ensure we add the load back to the worklist BEFORE its users so they can
1931 // erased in the correct order.
1932 Worklist.push(LI);
1933
1934 Type *ElemType = VecTy->getElementType();
1935
1936 // Replace extracts with narrow scalar loads.
1937 for (User *U : LI->users()) {
1938 auto *EI = cast<ExtractElementInst>(U);
1939 Value *Idx = EI->getIndexOperand();
1940
1941 // Insert 'freeze' for poison indexes.
1942 auto It = NeedFreeze.find(EI);
1943 if (It != NeedFreeze.end())
1944 It->second.freeze(Builder, *cast<Instruction>(Idx));
1945
1946 Builder.SetInsertPoint(EI);
1947 Value *GEP =
1948 Builder.CreateInBoundsGEP(VecTy, Ptr, {Builder.getInt32(0), Idx});
1949 auto *NewLoad = cast<LoadInst>(
1950 Builder.CreateLoad(ElemType, GEP, EI->getName() + ".scalar"));
1951
1952 Align ScalarOpAlignment =
1953 computeAlignmentAfterScalarization(LI->getAlign(), ElemType, Idx, *DL);
1954 NewLoad->setAlignment(ScalarOpAlignment);
1955
1956 if (auto *ConstIdx = dyn_cast<ConstantInt>(Idx)) {
1957 size_t Offset = ConstIdx->getZExtValue() * DL->getTypeStoreSize(ElemType);
1958 AAMDNodes OldAAMD = LI->getAAMetadata();
1959 NewLoad->setAAMetadata(OldAAMD.adjustForAccess(Offset, ElemType, *DL));
1960 }
1961
1962 replaceValue(*EI, *NewLoad, false);
1963 }
1964
1965 FailureGuard.release();
1966 return true;
1967}
1968
1969bool VectorCombine::scalarizeExtExtract(Instruction &I) {
1971 return false;
1972 auto *Ext = dyn_cast<ZExtInst>(&I);
1973 if (!Ext)
1974 return false;
1975
1976 // Try to convert a vector zext feeding only extracts to a set of scalar
1977 // (Src << ExtIdx *Size) & (Size -1)
1978 // if profitable .
1979 auto *SrcTy = dyn_cast<FixedVectorType>(Ext->getOperand(0)->getType());
1980 if (!SrcTy)
1981 return false;
1982 auto *DstTy = cast<FixedVectorType>(Ext->getType());
1983
1984 Type *ScalarDstTy = DstTy->getElementType();
1985 if (DL->getTypeSizeInBits(SrcTy) != DL->getTypeSizeInBits(ScalarDstTy))
1986 return false;
1987
1988 InstructionCost VectorCost =
1989 TTI.getCastInstrCost(Instruction::ZExt, DstTy, SrcTy,
1991 unsigned ExtCnt = 0;
1992 bool ExtLane0 = false;
1993 for (User *U : Ext->users()) {
1994 uint64_t Idx;
1995 if (!match(U, m_ExtractElt(m_Value(), m_ConstantInt(Idx))))
1996 return false;
1997 if (cast<Instruction>(U)->use_empty())
1998 continue;
1999 ExtCnt += 1;
2000 ExtLane0 |= !Idx;
2001 VectorCost += TTI.getVectorInstrCost(Instruction::ExtractElement, DstTy,
2002 CostKind, Idx, U);
2003 }
2004
2005 InstructionCost ScalarCost =
2006 ExtCnt * TTI.getArithmeticInstrCost(
2007 Instruction::And, ScalarDstTy, CostKind,
2010 (ExtCnt - ExtLane0) *
2012 Instruction::LShr, ScalarDstTy, CostKind,
2015 if (ScalarCost > VectorCost)
2016 return false;
2017
2018 Value *ScalarV = Ext->getOperand(0);
2019 if (!isGuaranteedNotToBePoison(ScalarV, &AC, dyn_cast<Instruction>(ScalarV),
2020 &DT))
2021 ScalarV = Builder.CreateFreeze(ScalarV);
2022 ScalarV = Builder.CreateBitCast(
2023 ScalarV,
2024 IntegerType::get(SrcTy->getContext(), DL->getTypeSizeInBits(SrcTy)));
2025 uint64_t SrcEltSizeInBits = DL->getTypeSizeInBits(SrcTy->getElementType());
2026 uint64_t EltBitMask = (1ull << SrcEltSizeInBits) - 1;
2027 uint64_t TotalBits = DL->getTypeSizeInBits(SrcTy);
2028 Type *PackedTy = IntegerType::get(SrcTy->getContext(), TotalBits);
2029 Value *Mask = ConstantInt::get(PackedTy, EltBitMask);
2030 for (User *U : Ext->users()) {
2031 auto *Extract = cast<ExtractElementInst>(U);
2032 uint64_t Idx =
2033 cast<ConstantInt>(Extract->getIndexOperand())->getZExtValue();
2034 uint64_t ShiftAmt =
2035 DL->isBigEndian()
2036 ? (TotalBits - SrcEltSizeInBits - Idx * SrcEltSizeInBits)
2037 : (Idx * SrcEltSizeInBits);
2038 Value *LShr = Builder.CreateLShr(ScalarV, ShiftAmt);
2039 Value *And = Builder.CreateAnd(LShr, Mask);
2040 U->replaceAllUsesWith(And);
2041 }
2042 return true;
2043}
2044
2045/// Try to fold "(or (zext (bitcast X)), (shl (zext (bitcast Y)), C))"
2046/// to "(bitcast (concat X, Y))"
2047/// where X/Y are bitcasted from i1 mask vectors.
2048bool VectorCombine::foldConcatOfBoolMasks(Instruction &I) {
2049 Type *Ty = I.getType();
2050 if (!Ty->isIntegerTy())
2051 return false;
2052
2053 // TODO: Add big endian test coverage
2054 if (DL->isBigEndian())
2055 return false;
2056
2057 // Restrict to disjoint cases so the mask vectors aren't overlapping.
2058 Instruction *X, *Y;
2060 return false;
2061
2062 // Allow both sources to contain shl, to handle more generic pattern:
2063 // "(or (shl (zext (bitcast X)), C1), (shl (zext (bitcast Y)), C2))"
2064 Value *SrcX;
2065 uint64_t ShAmtX = 0;
2066 if (!match(X, m_OneUse(m_ZExt(m_OneUse(m_BitCast(m_Value(SrcX)))))) &&
2067 !match(X, m_OneUse(
2069 m_ConstantInt(ShAmtX)))))
2070 return false;
2071
2072 Value *SrcY;
2073 uint64_t ShAmtY = 0;
2074 if (!match(Y, m_OneUse(m_ZExt(m_OneUse(m_BitCast(m_Value(SrcY)))))) &&
2075 !match(Y, m_OneUse(
2077 m_ConstantInt(ShAmtY)))))
2078 return false;
2079
2080 // Canonicalize larger shift to the RHS.
2081 if (ShAmtX > ShAmtY) {
2082 std::swap(X, Y);
2083 std::swap(SrcX, SrcY);
2084 std::swap(ShAmtX, ShAmtY);
2085 }
2086
2087 // Ensure both sources are matching vXi1 bool mask types, and that the shift
2088 // difference is the mask width so they can be easily concatenated together.
2089 uint64_t ShAmtDiff = ShAmtY - ShAmtX;
2090 unsigned NumSHL = (ShAmtX > 0) + (ShAmtY > 0);
2091 unsigned BitWidth = Ty->getPrimitiveSizeInBits();
2092 auto *MaskTy = dyn_cast<FixedVectorType>(SrcX->getType());
2093 if (!MaskTy || SrcX->getType() != SrcY->getType() ||
2094 !MaskTy->getElementType()->isIntegerTy(1) ||
2095 MaskTy->getNumElements() != ShAmtDiff ||
2096 MaskTy->getNumElements() > (BitWidth / 2))
2097 return false;
2098
2099 auto *ConcatTy = FixedVectorType::getDoubleElementsVectorType(MaskTy);
2100 auto *ConcatIntTy =
2101 Type::getIntNTy(Ty->getContext(), ConcatTy->getNumElements());
2102 auto *MaskIntTy = Type::getIntNTy(Ty->getContext(), ShAmtDiff);
2103
2104 SmallVector<int, 32> ConcatMask(ConcatTy->getNumElements());
2105 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
2106
2107 // TODO: Is it worth supporting multi use cases?
2108 InstructionCost OldCost = 0;
2109 OldCost += TTI.getArithmeticInstrCost(Instruction::Or, Ty, CostKind);
2110 OldCost +=
2111 NumSHL * TTI.getArithmeticInstrCost(Instruction::Shl, Ty, CostKind);
2112 OldCost += 2 * TTI.getCastInstrCost(Instruction::ZExt, Ty, MaskIntTy,
2114 OldCost += 2 * TTI.getCastInstrCost(Instruction::BitCast, MaskIntTy, MaskTy,
2116
2117 InstructionCost NewCost = 0;
2119 MaskTy, ConcatMask, CostKind);
2120 NewCost += TTI.getCastInstrCost(Instruction::BitCast, ConcatIntTy, ConcatTy,
2122 if (Ty != ConcatIntTy)
2123 NewCost += TTI.getCastInstrCost(Instruction::ZExt, Ty, ConcatIntTy,
2125 if (ShAmtX > 0)
2126 NewCost += TTI.getArithmeticInstrCost(Instruction::Shl, Ty, CostKind);
2127
2128 LLVM_DEBUG(dbgs() << "Found a concatenation of bitcasted bool masks: " << I
2129 << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost
2130 << "\n");
2131
2132 if (NewCost > OldCost)
2133 return false;
2134
2135 // Build bool mask concatenation, bitcast back to scalar integer, and perform
2136 // any residual zero-extension or shifting.
2137 Value *Concat = Builder.CreateShuffleVector(SrcX, SrcY, ConcatMask);
2138 Worklist.pushValue(Concat);
2139
2140 Value *Result = Builder.CreateBitCast(Concat, ConcatIntTy);
2141
2142 if (Ty != ConcatIntTy) {
2143 Worklist.pushValue(Result);
2144 Result = Builder.CreateZExt(Result, Ty);
2145 }
2146
2147 if (ShAmtX > 0) {
2148 Worklist.pushValue(Result);
2149 Result = Builder.CreateShl(Result, ShAmtX);
2150 }
2151
2152 replaceValue(I, *Result);
2153 return true;
2154}
2155
2156/// Try to convert "shuffle (binop (shuffle, shuffle)), undef"
2157/// --> "binop (shuffle), (shuffle)".
2158bool VectorCombine::foldPermuteOfBinops(Instruction &I) {
2159 BinaryOperator *BinOp;
2160 ArrayRef<int> OuterMask;
2161 if (!match(&I,
2162 m_Shuffle(m_OneUse(m_BinOp(BinOp)), m_Undef(), m_Mask(OuterMask))))
2163 return false;
2164
2165 // Don't introduce poison into div/rem.
2166 if (BinOp->isIntDivRem() && llvm::is_contained(OuterMask, PoisonMaskElem))
2167 return false;
2168
2169 Value *Op00, *Op01, *Op10, *Op11;
2170 ArrayRef<int> Mask0, Mask1;
2171 bool Match0 =
2172 match(BinOp->getOperand(0),
2173 m_OneUse(m_Shuffle(m_Value(Op00), m_Value(Op01), m_Mask(Mask0))));
2174 bool Match1 =
2175 match(BinOp->getOperand(1),
2176 m_OneUse(m_Shuffle(m_Value(Op10), m_Value(Op11), m_Mask(Mask1))));
2177 if (!Match0 && !Match1)
2178 return false;
2179
2180 Op00 = Match0 ? Op00 : BinOp->getOperand(0);
2181 Op01 = Match0 ? Op01 : BinOp->getOperand(0);
2182 Op10 = Match1 ? Op10 : BinOp->getOperand(1);
2183 Op11 = Match1 ? Op11 : BinOp->getOperand(1);
2184
2185 Instruction::BinaryOps Opcode = BinOp->getOpcode();
2186 auto *ShuffleDstTy = dyn_cast<FixedVectorType>(I.getType());
2187 auto *BinOpTy = dyn_cast<FixedVectorType>(BinOp->getType());
2188 auto *Op0Ty = dyn_cast<FixedVectorType>(Op00->getType());
2189 auto *Op1Ty = dyn_cast<FixedVectorType>(Op10->getType());
2190 if (!ShuffleDstTy || !BinOpTy || !Op0Ty || !Op1Ty)
2191 return false;
2192
2193 unsigned NumSrcElts = BinOpTy->getNumElements();
2194
2195 // Don't accept shuffles that reference the second operand in
2196 // div/rem or if its an undef arg.
2197 if ((BinOp->isIntDivRem() || !isa<PoisonValue>(I.getOperand(1))) &&
2198 any_of(OuterMask, [NumSrcElts](int M) { return M >= (int)NumSrcElts; }))
2199 return false;
2200
2201 // Merge outer / inner (or identity if no match) shuffles.
2202 SmallVector<int> NewMask0, NewMask1;
2203 for (int M : OuterMask) {
2204 if (M < 0 || M >= (int)NumSrcElts) {
2205 NewMask0.push_back(PoisonMaskElem);
2206 NewMask1.push_back(PoisonMaskElem);
2207 } else {
2208 NewMask0.push_back(Match0 ? Mask0[M] : M);
2209 NewMask1.push_back(Match1 ? Mask1[M] : M);
2210 }
2211 }
2212
2213 unsigned NumOpElts = Op0Ty->getNumElements();
2214 bool IsIdentity0 = ShuffleDstTy == Op0Ty &&
2215 all_of(NewMask0, [NumOpElts](int M) { return M < (int)NumOpElts; }) &&
2216 ShuffleVectorInst::isIdentityMask(NewMask0, NumOpElts);
2217 bool IsIdentity1 = ShuffleDstTy == Op1Ty &&
2218 all_of(NewMask1, [NumOpElts](int M) { return M < (int)NumOpElts; }) &&
2219 ShuffleVectorInst::isIdentityMask(NewMask1, NumOpElts);
2220
2221 // Try to merge shuffles across the binop if the new shuffles are not costly.
2222 InstructionCost OldCost =
2223 TTI.getArithmeticInstrCost(Opcode, BinOpTy, CostKind) +
2225 BinOpTy, OuterMask, CostKind, 0, nullptr, {BinOp}, &I);
2226 if (Match0)
2227 OldCost += TTI.getShuffleCost(
2228 TargetTransformInfo::SK_PermuteTwoSrc, BinOpTy, Op0Ty, Mask0, CostKind,
2229 0, nullptr, {Op00, Op01}, cast<Instruction>(BinOp->getOperand(0)));
2230 if (Match1)
2231 OldCost += TTI.getShuffleCost(
2232 TargetTransformInfo::SK_PermuteTwoSrc, BinOpTy, Op1Ty, Mask1, CostKind,
2233 0, nullptr, {Op10, Op11}, cast<Instruction>(BinOp->getOperand(1)));
2234
2235 InstructionCost NewCost =
2236 TTI.getArithmeticInstrCost(Opcode, ShuffleDstTy, CostKind);
2237
2238 if (!IsIdentity0)
2239 NewCost +=
2241 Op0Ty, NewMask0, CostKind, 0, nullptr, {Op00, Op01});
2242 if (!IsIdentity1)
2243 NewCost +=
2245 Op1Ty, NewMask1, CostKind, 0, nullptr, {Op10, Op11});
2246
2247 LLVM_DEBUG(dbgs() << "Found a shuffle feeding a shuffled binop: " << I
2248 << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost
2249 << "\n");
2250
2251 // If costs are equal, still fold as we reduce instruction count.
2252 if (NewCost > OldCost)
2253 return false;
2254
2255 Value *LHS =
2256 IsIdentity0 ? Op00 : Builder.CreateShuffleVector(Op00, Op01, NewMask0);
2257 Value *RHS =
2258 IsIdentity1 ? Op10 : Builder.CreateShuffleVector(Op10, Op11, NewMask1);
2259 Value *NewBO = Builder.CreateBinOp(Opcode, LHS, RHS);
2260
2261 // Intersect flags from the old binops.
2262 if (auto *NewInst = dyn_cast<Instruction>(NewBO))
2263 NewInst->copyIRFlags(BinOp);
2264
2265 Worklist.pushValue(LHS);
2266 Worklist.pushValue(RHS);
2267 replaceValue(I, *NewBO);
2268 return true;
2269}
2270
2271/// Try to convert "shuffle (binop), (binop)" into "binop (shuffle), (shuffle)".
2272/// Try to convert "shuffle (cmpop), (cmpop)" into "cmpop (shuffle), (shuffle)".
2273bool VectorCombine::foldShuffleOfBinops(Instruction &I) {
2274 ArrayRef<int> OldMask;
2275 Instruction *LHS, *RHS;
2277 m_OneUse(m_Instruction(RHS)), m_Mask(OldMask))))
2278 return false;
2279
2280 // TODO: Add support for addlike etc.
2281 if (LHS->getOpcode() != RHS->getOpcode())
2282 return false;
2283
2284 Value *X, *Y, *Z, *W;
2285 bool IsCommutative = false;
2286 CmpPredicate PredLHS = CmpInst::BAD_ICMP_PREDICATE;
2287 CmpPredicate PredRHS = CmpInst::BAD_ICMP_PREDICATE;
2288 if (match(LHS, m_BinOp(m_Value(X), m_Value(Y))) &&
2289 match(RHS, m_BinOp(m_Value(Z), m_Value(W)))) {
2290 auto *BO = cast<BinaryOperator>(LHS);
2291 // Don't introduce poison into div/rem.
2292 if (llvm::is_contained(OldMask, PoisonMaskElem) && BO->isIntDivRem())
2293 return false;
2294 IsCommutative = BinaryOperator::isCommutative(BO->getOpcode());
2295 } else if (match(LHS, m_Cmp(PredLHS, m_Value(X), m_Value(Y))) &&
2296 match(RHS, m_Cmp(PredRHS, m_Value(Z), m_Value(W))) &&
2297 (CmpInst::Predicate)PredLHS == (CmpInst::Predicate)PredRHS) {
2298 IsCommutative = cast<CmpInst>(LHS)->isCommutative();
2299 } else
2300 return false;
2301
2302 auto *ShuffleDstTy = dyn_cast<FixedVectorType>(I.getType());
2303 auto *BinResTy = dyn_cast<FixedVectorType>(LHS->getType());
2304 auto *BinOpTy = dyn_cast<FixedVectorType>(X->getType());
2305 if (!ShuffleDstTy || !BinResTy || !BinOpTy || X->getType() != Z->getType())
2306 return false;
2307
2308 unsigned NumSrcElts = BinOpTy->getNumElements();
2309
2310 // If we have something like "add X, Y" and "add Z, X", swap ops to match.
2311 if (IsCommutative && X != Z && Y != W && (X == W || Y == Z))
2312 std::swap(X, Y);
2313
2314 auto ConvertToUnary = [NumSrcElts](int &M) {
2315 if (M >= (int)NumSrcElts)
2316 M -= NumSrcElts;
2317 };
2318
2319 SmallVector<int> NewMask0(OldMask);
2321 if (X == Z) {
2322 llvm::for_each(NewMask0, ConvertToUnary);
2324 Z = PoisonValue::get(BinOpTy);
2325 }
2326
2327 SmallVector<int> NewMask1(OldMask);
2329 if (Y == W) {
2330 llvm::for_each(NewMask1, ConvertToUnary);
2332 W = PoisonValue::get(BinOpTy);
2333 }
2334
2335 // Try to replace a binop with a shuffle if the shuffle is not costly.
2336 InstructionCost OldCost =
2340 BinResTy, OldMask, CostKind, 0, nullptr, {LHS, RHS},
2341 &I);
2342
2343 // Handle shuffle(binop(shuffle(x),y),binop(z,shuffle(w))) style patterns
2344 // where one use shuffles have gotten split across the binop/cmp. These
2345 // often allow a major reduction in total cost that wouldn't happen as
2346 // individual folds.
2347 auto MergeInner = [&](Value *&Op, int Offset, MutableArrayRef<int> Mask,
2348 TTI::TargetCostKind CostKind) -> bool {
2349 Value *InnerOp;
2350 ArrayRef<int> InnerMask;
2351 if (match(Op, m_OneUse(m_Shuffle(m_Value(InnerOp), m_Undef(),
2352 m_Mask(InnerMask)))) &&
2353 InnerOp->getType() == Op->getType() &&
2354 all_of(InnerMask,
2355 [NumSrcElts](int M) { return M < (int)NumSrcElts; })) {
2356 for (int &M : Mask)
2357 if (Offset <= M && M < (int)(Offset + NumSrcElts)) {
2358 M = InnerMask[M - Offset];
2359 M = 0 <= M ? M + Offset : M;
2360 }
2362 Op = InnerOp;
2363 return true;
2364 }
2365 return false;
2366 };
2367 bool ReducedInstCount = false;
2368 ReducedInstCount |= MergeInner(X, 0, NewMask0, CostKind);
2369 ReducedInstCount |= MergeInner(Y, 0, NewMask1, CostKind);
2370 ReducedInstCount |= MergeInner(Z, NumSrcElts, NewMask0, CostKind);
2371 ReducedInstCount |= MergeInner(W, NumSrcElts, NewMask1, CostKind);
2372
2373 auto *ShuffleCmpTy =
2374 FixedVectorType::get(BinOpTy->getElementType(), ShuffleDstTy);
2375 InstructionCost NewCost =
2376 TTI.getShuffleCost(SK0, ShuffleCmpTy, BinOpTy, NewMask0, CostKind, 0,
2377 nullptr, {X, Z}) +
2378 TTI.getShuffleCost(SK1, ShuffleCmpTy, BinOpTy, NewMask1, CostKind, 0,
2379 nullptr, {Y, W});
2380
2381 if (PredLHS == CmpInst::BAD_ICMP_PREDICATE) {
2382 NewCost +=
2383 TTI.getArithmeticInstrCost(LHS->getOpcode(), ShuffleDstTy, CostKind);
2384 } else {
2385 NewCost += TTI.getCmpSelInstrCost(LHS->getOpcode(), ShuffleCmpTy,
2386 ShuffleDstTy, PredLHS, CostKind);
2387 }
2388
2389 LLVM_DEBUG(dbgs() << "Found a shuffle feeding two binops: " << I
2390 << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost
2391 << "\n");
2392
2393 // If either shuffle will constant fold away, then fold for the same cost as
2394 // we will reduce the instruction count.
2395 ReducedInstCount |= (isa<Constant>(X) && isa<Constant>(Z)) ||
2396 (isa<Constant>(Y) && isa<Constant>(W));
2397 if (ReducedInstCount ? (NewCost > OldCost) : (NewCost >= OldCost))
2398 return false;
2399
2400 Value *Shuf0 = Builder.CreateShuffleVector(X, Z, NewMask0);
2401 Value *Shuf1 = Builder.CreateShuffleVector(Y, W, NewMask1);
2402 Value *NewBO = PredLHS == CmpInst::BAD_ICMP_PREDICATE
2403 ? Builder.CreateBinOp(
2404 cast<BinaryOperator>(LHS)->getOpcode(), Shuf0, Shuf1)
2405 : Builder.CreateCmp(PredLHS, Shuf0, Shuf1);
2406
2407 // Intersect flags from the old binops.
2408 if (auto *NewInst = dyn_cast<Instruction>(NewBO)) {
2409 NewInst->copyIRFlags(LHS);
2410 NewInst->andIRFlags(RHS);
2411 }
2412
2413 Worklist.pushValue(Shuf0);
2414 Worklist.pushValue(Shuf1);
2415 replaceValue(I, *NewBO);
2416 return true;
2417}
2418
2419/// Try to convert,
2420/// (shuffle(select(c1,t1,f1)), (select(c2,t2,f2)), m) into
2421/// (select (shuffle c1,c2,m), (shuffle t1,t2,m), (shuffle f1,f2,m))
2422bool VectorCombine::foldShuffleOfSelects(Instruction &I) {
2423 ArrayRef<int> Mask;
2424 Value *C1, *T1, *F1, *C2, *T2, *F2;
2425 if (!match(&I, m_Shuffle(
2427 m_OneUse(m_Select(m_Value(C2), m_Value(T2), m_Value(F2))),
2428 m_Mask(Mask))))
2429 return false;
2430
2431 auto *C1VecTy = dyn_cast<FixedVectorType>(C1->getType());
2432 auto *C2VecTy = dyn_cast<FixedVectorType>(C2->getType());
2433 if (!C1VecTy || !C2VecTy || C1VecTy != C2VecTy)
2434 return false;
2435
2436 auto *SI0FOp = dyn_cast<FPMathOperator>(I.getOperand(0));
2437 auto *SI1FOp = dyn_cast<FPMathOperator>(I.getOperand(1));
2438 // SelectInsts must have the same FMF.
2439 if (((SI0FOp == nullptr) != (SI1FOp == nullptr)) ||
2440 ((SI0FOp != nullptr) &&
2441 (SI0FOp->getFastMathFlags() != SI1FOp->getFastMathFlags())))
2442 return false;
2443
2444 auto *SrcVecTy = cast<FixedVectorType>(T1->getType());
2445 auto *DstVecTy = cast<FixedVectorType>(I.getType());
2447 auto SelOp = Instruction::Select;
2449 SelOp, SrcVecTy, C1VecTy, CmpInst::BAD_ICMP_PREDICATE, CostKind);
2450 OldCost += TTI.getCmpSelInstrCost(SelOp, SrcVecTy, C2VecTy,
2452 OldCost +=
2453 TTI.getShuffleCost(SK, DstVecTy, SrcVecTy, Mask, CostKind, 0, nullptr,
2454 {I.getOperand(0), I.getOperand(1)}, &I);
2455
2457 SK, FixedVectorType::get(C1VecTy->getScalarType(), Mask.size()), C1VecTy,
2458 Mask, CostKind, 0, nullptr, {C1, C2});
2459 NewCost += TTI.getShuffleCost(SK, DstVecTy, SrcVecTy, Mask, CostKind, 0,
2460 nullptr, {T1, T2});
2461 NewCost += TTI.getShuffleCost(SK, DstVecTy, SrcVecTy, Mask, CostKind, 0,
2462 nullptr, {F1, F2});
2463 auto *C1C2ShuffledVecTy = cast<FixedVectorType>(
2464 toVectorTy(Type::getInt1Ty(I.getContext()), DstVecTy->getNumElements()));
2465 NewCost += TTI.getCmpSelInstrCost(SelOp, DstVecTy, C1C2ShuffledVecTy,
2467
2468 LLVM_DEBUG(dbgs() << "Found a shuffle feeding two selects: " << I
2469 << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost
2470 << "\n");
2471 if (NewCost > OldCost)
2472 return false;
2473
2474 Value *ShuffleCmp = Builder.CreateShuffleVector(C1, C2, Mask);
2475 Value *ShuffleTrue = Builder.CreateShuffleVector(T1, T2, Mask);
2476 Value *ShuffleFalse = Builder.CreateShuffleVector(F1, F2, Mask);
2477 Value *NewSel;
2478 // We presuppose that the SelectInsts have the same FMF.
2479 if (SI0FOp)
2480 NewSel = Builder.CreateSelectFMF(ShuffleCmp, ShuffleTrue, ShuffleFalse,
2481 SI0FOp->getFastMathFlags());
2482 else
2483 NewSel = Builder.CreateSelect(ShuffleCmp, ShuffleTrue, ShuffleFalse);
2484
2485 Worklist.pushValue(ShuffleCmp);
2486 Worklist.pushValue(ShuffleTrue);
2487 Worklist.pushValue(ShuffleFalse);
2488 replaceValue(I, *NewSel);
2489 return true;
2490}
2491
2492/// Try to convert "shuffle (castop), (castop)" with a shared castop operand
2493/// into "castop (shuffle)".
2494bool VectorCombine::foldShuffleOfCastops(Instruction &I) {
2495 Value *V0, *V1;
2496 ArrayRef<int> OldMask;
2497 if (!match(&I, m_Shuffle(m_Value(V0), m_Value(V1), m_Mask(OldMask))))
2498 return false;
2499
2500 // Check whether this is a binary shuffle.
2501 bool IsBinaryShuffle = !isa<UndefValue>(V1);
2502
2503 auto *C0 = dyn_cast<CastInst>(V0);
2504 auto *C1 = dyn_cast<CastInst>(V1);
2505 if (!C0 || (IsBinaryShuffle && !C1))
2506 return false;
2507
2508 Instruction::CastOps Opcode = C0->getOpcode();
2509
2510 // If this is allowed, foldShuffleOfCastops can get stuck in a loop
2511 // with foldBitcastOfShuffle. Reject in favor of foldBitcastOfShuffle.
2512 if (!IsBinaryShuffle && Opcode == Instruction::BitCast)
2513 return false;
2514
2515 if (IsBinaryShuffle) {
2516 if (C0->getSrcTy() != C1->getSrcTy())
2517 return false;
2518 // Handle shuffle(zext_nneg(x), sext(y)) -> sext(shuffle(x,y)) folds.
2519 if (Opcode != C1->getOpcode()) {
2520 if (match(C0, m_SExtLike(m_Value())) && match(C1, m_SExtLike(m_Value())))
2521 Opcode = Instruction::SExt;
2522 else
2523 return false;
2524 }
2525 }
2526
2527 auto *ShuffleDstTy = dyn_cast<FixedVectorType>(I.getType());
2528 auto *CastDstTy = dyn_cast<FixedVectorType>(C0->getDestTy());
2529 auto *CastSrcTy = dyn_cast<FixedVectorType>(C0->getSrcTy());
2530 if (!ShuffleDstTy || !CastDstTy || !CastSrcTy)
2531 return false;
2532
2533 unsigned NumSrcElts = CastSrcTy->getNumElements();
2534 unsigned NumDstElts = CastDstTy->getNumElements();
2535 assert((NumDstElts == NumSrcElts || Opcode == Instruction::BitCast) &&
2536 "Only bitcasts expected to alter src/dst element counts");
2537
2538 // Check for bitcasting of unscalable vector types.
2539 // e.g. <32 x i40> -> <40 x i32>
2540 if (NumDstElts != NumSrcElts && (NumSrcElts % NumDstElts) != 0 &&
2541 (NumDstElts % NumSrcElts) != 0)
2542 return false;
2543
2544 SmallVector<int, 16> NewMask;
2545 if (NumSrcElts >= NumDstElts) {
2546 // The bitcast is from wide to narrow/equal elements. The shuffle mask can
2547 // always be expanded to the equivalent form choosing narrower elements.
2548 assert(NumSrcElts % NumDstElts == 0 && "Unexpected shuffle mask");
2549 unsigned ScaleFactor = NumSrcElts / NumDstElts;
2550 narrowShuffleMaskElts(ScaleFactor, OldMask, NewMask);
2551 } else {
2552 // The bitcast is from narrow elements to wide elements. The shuffle mask
2553 // must choose consecutive elements to allow casting first.
2554 assert(NumDstElts % NumSrcElts == 0 && "Unexpected shuffle mask");
2555 unsigned ScaleFactor = NumDstElts / NumSrcElts;
2556 if (!widenShuffleMaskElts(ScaleFactor, OldMask, NewMask))
2557 return false;
2558 }
2559
2560 auto *NewShuffleDstTy =
2561 FixedVectorType::get(CastSrcTy->getScalarType(), NewMask.size());
2562
2563 // Try to replace a castop with a shuffle if the shuffle is not costly.
2564 InstructionCost CostC0 =
2565 TTI.getCastInstrCost(C0->getOpcode(), CastDstTy, CastSrcTy,
2567
2569 if (IsBinaryShuffle)
2571 else
2573
2574 InstructionCost OldCost = CostC0;
2575 OldCost += TTI.getShuffleCost(ShuffleKind, ShuffleDstTy, CastDstTy, OldMask,
2576 CostKind, 0, nullptr, {}, &I);
2577
2578 InstructionCost NewCost = TTI.getShuffleCost(ShuffleKind, NewShuffleDstTy,
2579 CastSrcTy, NewMask, CostKind);
2580 NewCost += TTI.getCastInstrCost(Opcode, ShuffleDstTy, NewShuffleDstTy,
2582 if (!C0->hasOneUse())
2583 NewCost += CostC0;
2584 if (IsBinaryShuffle) {
2585 InstructionCost CostC1 =
2586 TTI.getCastInstrCost(C1->getOpcode(), CastDstTy, CastSrcTy,
2588 OldCost += CostC1;
2589 if (!C1->hasOneUse())
2590 NewCost += CostC1;
2591 }
2592
2593 LLVM_DEBUG(dbgs() << "Found a shuffle feeding two casts: " << I
2594 << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost
2595 << "\n");
2596 if (NewCost > OldCost)
2597 return false;
2598
2599 Value *Shuf;
2600 if (IsBinaryShuffle)
2601 Shuf = Builder.CreateShuffleVector(C0->getOperand(0), C1->getOperand(0),
2602 NewMask);
2603 else
2604 Shuf = Builder.CreateShuffleVector(C0->getOperand(0), NewMask);
2605
2606 Value *Cast = Builder.CreateCast(Opcode, Shuf, ShuffleDstTy);
2607
2608 // Intersect flags from the old casts.
2609 if (auto *NewInst = dyn_cast<Instruction>(Cast)) {
2610 NewInst->copyIRFlags(C0);
2611 if (IsBinaryShuffle)
2612 NewInst->andIRFlags(C1);
2613 }
2614
2615 Worklist.pushValue(Shuf);
2616 replaceValue(I, *Cast);
2617 return true;
2618}
2619
2620/// Try to convert any of:
2621/// "shuffle (shuffle x, y), (shuffle y, x)"
2622/// "shuffle (shuffle x, undef), (shuffle y, undef)"
2623/// "shuffle (shuffle x, undef), y"
2624/// "shuffle x, (shuffle y, undef)"
2625/// into "shuffle x, y".
2626bool VectorCombine::foldShuffleOfShuffles(Instruction &I) {
2627 ArrayRef<int> OuterMask;
2628 Value *OuterV0, *OuterV1;
2629 if (!match(&I,
2630 m_Shuffle(m_Value(OuterV0), m_Value(OuterV1), m_Mask(OuterMask))))
2631 return false;
2632
2633 ArrayRef<int> InnerMask0, InnerMask1;
2634 Value *X0, *X1, *Y0, *Y1;
2635 bool Match0 =
2636 match(OuterV0, m_Shuffle(m_Value(X0), m_Value(Y0), m_Mask(InnerMask0)));
2637 bool Match1 =
2638 match(OuterV1, m_Shuffle(m_Value(X1), m_Value(Y1), m_Mask(InnerMask1)));
2639 if (!Match0 && !Match1)
2640 return false;
2641
2642 // If the outer shuffle is a permute, then create a fake inner all-poison
2643 // shuffle. This is easier than accounting for length-changing shuffles below.
2644 SmallVector<int, 16> PoisonMask1;
2645 if (!Match1 && isa<PoisonValue>(OuterV1)) {
2646 X1 = X0;
2647 Y1 = Y0;
2648 PoisonMask1.append(InnerMask0.size(), PoisonMaskElem);
2649 InnerMask1 = PoisonMask1;
2650 Match1 = true; // fake match
2651 }
2652
2653 X0 = Match0 ? X0 : OuterV0;
2654 Y0 = Match0 ? Y0 : OuterV0;
2655 X1 = Match1 ? X1 : OuterV1;
2656 Y1 = Match1 ? Y1 : OuterV1;
2657 auto *ShuffleDstTy = dyn_cast<FixedVectorType>(I.getType());
2658 auto *ShuffleSrcTy = dyn_cast<FixedVectorType>(X0->getType());
2659 auto *ShuffleImmTy = dyn_cast<FixedVectorType>(OuterV0->getType());
2660 if (!ShuffleDstTy || !ShuffleSrcTy || !ShuffleImmTy ||
2661 X0->getType() != X1->getType())
2662 return false;
2663
2664 unsigned NumSrcElts = ShuffleSrcTy->getNumElements();
2665 unsigned NumImmElts = ShuffleImmTy->getNumElements();
2666
2667 // Attempt to merge shuffles, matching upto 2 source operands.
2668 // Replace index to a poison arg with PoisonMaskElem.
2669 // Bail if either inner masks reference an undef arg.
2670 SmallVector<int, 16> NewMask(OuterMask);
2671 Value *NewX = nullptr, *NewY = nullptr;
2672 for (int &M : NewMask) {
2673 Value *Src = nullptr;
2674 if (0 <= M && M < (int)NumImmElts) {
2675 Src = OuterV0;
2676 if (Match0) {
2677 M = InnerMask0[M];
2678 Src = M >= (int)NumSrcElts ? Y0 : X0;
2679 M = M >= (int)NumSrcElts ? (M - NumSrcElts) : M;
2680 }
2681 } else if (M >= (int)NumImmElts) {
2682 Src = OuterV1;
2683 M -= NumImmElts;
2684 if (Match1) {
2685 M = InnerMask1[M];
2686 Src = M >= (int)NumSrcElts ? Y1 : X1;
2687 M = M >= (int)NumSrcElts ? (M - NumSrcElts) : M;
2688 }
2689 }
2690 if (Src && M != PoisonMaskElem) {
2691 assert(0 <= M && M < (int)NumSrcElts && "Unexpected shuffle mask index");
2692 if (isa<UndefValue>(Src)) {
2693 // We've referenced an undef element - if its poison, update the shuffle
2694 // mask, else bail.
2695 if (!isa<PoisonValue>(Src))
2696 return false;
2697 M = PoisonMaskElem;
2698 continue;
2699 }
2700 if (!NewX || NewX == Src) {
2701 NewX = Src;
2702 continue;
2703 }
2704 if (!NewY || NewY == Src) {
2705 M += NumSrcElts;
2706 NewY = Src;
2707 continue;
2708 }
2709 return false;
2710 }
2711 }
2712
2713 if (!NewX)
2714 return PoisonValue::get(ShuffleDstTy);
2715 if (!NewY)
2716 NewY = PoisonValue::get(ShuffleSrcTy);
2717
2718 // Have we folded to an Identity shuffle?
2719 if (ShuffleVectorInst::isIdentityMask(NewMask, NumSrcElts)) {
2720 replaceValue(I, *NewX);
2721 return true;
2722 }
2723
2724 // Try to merge the shuffles if the new shuffle is not costly.
2725 InstructionCost InnerCost0 = 0;
2726 if (Match0)
2727 InnerCost0 = TTI.getInstructionCost(cast<User>(OuterV0), CostKind);
2728
2729 InstructionCost InnerCost1 = 0;
2730 if (Match1)
2731 InnerCost1 = TTI.getInstructionCost(cast<User>(OuterV1), CostKind);
2732
2734
2735 InstructionCost OldCost = InnerCost0 + InnerCost1 + OuterCost;
2736
2737 bool IsUnary = all_of(NewMask, [&](int M) { return M < (int)NumSrcElts; });
2741 InstructionCost NewCost =
2742 TTI.getShuffleCost(SK, ShuffleDstTy, ShuffleSrcTy, NewMask, CostKind, 0,
2743 nullptr, {NewX, NewY});
2744 if (!OuterV0->hasOneUse())
2745 NewCost += InnerCost0;
2746 if (!OuterV1->hasOneUse())
2747 NewCost += InnerCost1;
2748
2749 LLVM_DEBUG(dbgs() << "Found a shuffle feeding two shuffles: " << I
2750 << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost
2751 << "\n");
2752 if (NewCost > OldCost)
2753 return false;
2754
2755 Value *Shuf = Builder.CreateShuffleVector(NewX, NewY, NewMask);
2756 replaceValue(I, *Shuf);
2757 return true;
2758}
2759
2760/// Try to convert
2761/// "shuffle (intrinsic), (intrinsic)" into "intrinsic (shuffle), (shuffle)".
2762bool VectorCombine::foldShuffleOfIntrinsics(Instruction &I) {
2763 Value *V0, *V1;
2764 ArrayRef<int> OldMask;
2765 if (!match(&I, m_Shuffle(m_OneUse(m_Value(V0)), m_OneUse(m_Value(V1)),
2766 m_Mask(OldMask))))
2767 return false;
2768
2769 auto *II0 = dyn_cast<IntrinsicInst>(V0);
2770 auto *II1 = dyn_cast<IntrinsicInst>(V1);
2771 if (!II0 || !II1)
2772 return false;
2773
2774 Intrinsic::ID IID = II0->getIntrinsicID();
2775 if (IID != II1->getIntrinsicID())
2776 return false;
2777
2778 auto *ShuffleDstTy = dyn_cast<FixedVectorType>(I.getType());
2779 auto *II0Ty = dyn_cast<FixedVectorType>(II0->getType());
2780 if (!ShuffleDstTy || !II0Ty)
2781 return false;
2782
2783 if (!isTriviallyVectorizable(IID))
2784 return false;
2785
2786 for (unsigned I = 0, E = II0->arg_size(); I != E; ++I)
2788 II0->getArgOperand(I) != II1->getArgOperand(I))
2789 return false;
2790
2791 InstructionCost OldCost =
2792 TTI.getIntrinsicInstrCost(IntrinsicCostAttributes(IID, *II0), CostKind) +
2793 TTI.getIntrinsicInstrCost(IntrinsicCostAttributes(IID, *II1), CostKind) +
2795 II0Ty, OldMask, CostKind, 0, nullptr, {II0, II1}, &I);
2796
2797 SmallVector<Type *> NewArgsTy;
2798 InstructionCost NewCost = 0;
2799 for (unsigned I = 0, E = II0->arg_size(); I != E; ++I) {
2801 NewArgsTy.push_back(II0->getArgOperand(I)->getType());
2802 } else {
2803 auto *VecTy = cast<FixedVectorType>(II0->getArgOperand(I)->getType());
2804 auto *ArgTy = FixedVectorType::get(VecTy->getElementType(),
2805 ShuffleDstTy->getNumElements());
2806 NewArgsTy.push_back(ArgTy);
2808 ArgTy, VecTy, OldMask, CostKind);
2809 }
2810 }
2811 IntrinsicCostAttributes NewAttr(IID, ShuffleDstTy, NewArgsTy);
2812 NewCost += TTI.getIntrinsicInstrCost(NewAttr, CostKind);
2813
2814 LLVM_DEBUG(dbgs() << "Found a shuffle feeding two intrinsics: " << I
2815 << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost
2816 << "\n");
2817
2818 if (NewCost > OldCost)
2819 return false;
2820
2821 SmallVector<Value *> NewArgs;
2822 for (unsigned I = 0, E = II0->arg_size(); I != E; ++I)
2824 NewArgs.push_back(II0->getArgOperand(I));
2825 } else {
2826 Value *Shuf = Builder.CreateShuffleVector(II0->getArgOperand(I),
2827 II1->getArgOperand(I), OldMask);
2828 NewArgs.push_back(Shuf);
2829 Worklist.pushValue(Shuf);
2830 }
2831 Value *NewIntrinsic = Builder.CreateIntrinsic(ShuffleDstTy, IID, NewArgs);
2832
2833 // Intersect flags from the old intrinsics.
2834 if (auto *NewInst = dyn_cast<Instruction>(NewIntrinsic)) {
2835 NewInst->copyIRFlags(II0);
2836 NewInst->andIRFlags(II1);
2837 }
2838
2839 replaceValue(I, *NewIntrinsic);
2840 return true;
2841}
2842
2843using InstLane = std::pair<Use *, int>;
2844
2845static InstLane lookThroughShuffles(Use *U, int Lane) {
2846 while (auto *SV = dyn_cast<ShuffleVectorInst>(U->get())) {
2847 unsigned NumElts =
2848 cast<FixedVectorType>(SV->getOperand(0)->getType())->getNumElements();
2849 int M = SV->getMaskValue(Lane);
2850 if (M < 0)
2851 return {nullptr, PoisonMaskElem};
2852 if (static_cast<unsigned>(M) < NumElts) {
2853 U = &SV->getOperandUse(0);
2854 Lane = M;
2855 } else {
2856 U = &SV->getOperandUse(1);
2857 Lane = M - NumElts;
2858 }
2859 }
2860 return InstLane{U, Lane};
2861}
2862
2866 for (InstLane IL : Item) {
2867 auto [U, Lane] = IL;
2868 InstLane OpLane =
2869 U ? lookThroughShuffles(&cast<Instruction>(U->get())->getOperandUse(Op),
2870 Lane)
2871 : InstLane{nullptr, PoisonMaskElem};
2872 NItem.emplace_back(OpLane);
2873 }
2874 return NItem;
2875}
2876
2877/// Detect concat of multiple values into a vector
2879 const TargetTransformInfo &TTI) {
2880 auto *Ty = cast<FixedVectorType>(Item.front().first->get()->getType());
2881 unsigned NumElts = Ty->getNumElements();
2882 if (Item.size() == NumElts || NumElts == 1 || Item.size() % NumElts != 0)
2883 return false;
2884
2885 // Check that the concat is free, usually meaning that the type will be split
2886 // during legalization.
2887 SmallVector<int, 16> ConcatMask(NumElts * 2);
2888 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
2889 if (TTI.getShuffleCost(TTI::SK_PermuteTwoSrc,
2890 FixedVectorType::get(Ty->getScalarType(), NumElts * 2),
2891 Ty, ConcatMask, CostKind) != 0)
2892 return false;
2893
2894 unsigned NumSlices = Item.size() / NumElts;
2895 // Currently we generate a tree of shuffles for the concats, which limits us
2896 // to a power2.
2897 if (!isPowerOf2_32(NumSlices))
2898 return false;
2899 for (unsigned Slice = 0; Slice < NumSlices; ++Slice) {
2900 Use *SliceV = Item[Slice * NumElts].first;
2901 if (!SliceV || SliceV->get()->getType() != Ty)
2902 return false;
2903 for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
2904 auto [V, Lane] = Item[Slice * NumElts + Elt];
2905 if (Lane != static_cast<int>(Elt) || SliceV->get() != V->get())
2906 return false;
2907 }
2908 }
2909 return true;
2910}
2911
2913 const SmallPtrSet<Use *, 4> &IdentityLeafs,
2914 const SmallPtrSet<Use *, 4> &SplatLeafs,
2915 const SmallPtrSet<Use *, 4> &ConcatLeafs,
2916 IRBuilderBase &Builder,
2917 const TargetTransformInfo *TTI) {
2918 auto [FrontU, FrontLane] = Item.front();
2919
2920 if (IdentityLeafs.contains(FrontU)) {
2921 return FrontU->get();
2922 }
2923 if (SplatLeafs.contains(FrontU)) {
2924 SmallVector<int, 16> Mask(Ty->getNumElements(), FrontLane);
2925 return Builder.CreateShuffleVector(FrontU->get(), Mask);
2926 }
2927 if (ConcatLeafs.contains(FrontU)) {
2928 unsigned NumElts =
2929 cast<FixedVectorType>(FrontU->get()->getType())->getNumElements();
2930 SmallVector<Value *> Values(Item.size() / NumElts, nullptr);
2931 for (unsigned S = 0; S < Values.size(); ++S)
2932 Values[S] = Item[S * NumElts].first->get();
2933
2934 while (Values.size() > 1) {
2935 NumElts *= 2;
2936 SmallVector<int, 16> Mask(NumElts, 0);
2937 std::iota(Mask.begin(), Mask.end(), 0);
2938 SmallVector<Value *> NewValues(Values.size() / 2, nullptr);
2939 for (unsigned S = 0; S < NewValues.size(); ++S)
2940 NewValues[S] =
2941 Builder.CreateShuffleVector(Values[S * 2], Values[S * 2 + 1], Mask);
2942 Values = NewValues;
2943 }
2944 return Values[0];
2945 }
2946
2947 auto *I = cast<Instruction>(FrontU->get());
2948 auto *II = dyn_cast<IntrinsicInst>(I);
2949 unsigned NumOps = I->getNumOperands() - (II ? 1 : 0);
2951 for (unsigned Idx = 0; Idx < NumOps; Idx++) {
2952 if (II &&
2953 isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Idx, TTI)) {
2954 Ops[Idx] = II->getOperand(Idx);
2955 continue;
2956 }
2958 Ty, IdentityLeafs, SplatLeafs, ConcatLeafs,
2959 Builder, TTI);
2960 }
2961
2962 SmallVector<Value *, 8> ValueList;
2963 for (const auto &Lane : Item)
2964 if (Lane.first)
2965 ValueList.push_back(Lane.first->get());
2966
2967 Type *DstTy =
2968 FixedVectorType::get(I->getType()->getScalarType(), Ty->getNumElements());
2969 if (auto *BI = dyn_cast<BinaryOperator>(I)) {
2970 auto *Value = Builder.CreateBinOp((Instruction::BinaryOps)BI->getOpcode(),
2971 Ops[0], Ops[1]);
2972 propagateIRFlags(Value, ValueList);
2973 return Value;
2974 }
2975 if (auto *CI = dyn_cast<CmpInst>(I)) {
2976 auto *Value = Builder.CreateCmp(CI->getPredicate(), Ops[0], Ops[1]);
2977 propagateIRFlags(Value, ValueList);
2978 return Value;
2979 }
2980 if (auto *SI = dyn_cast<SelectInst>(I)) {
2981 auto *Value = Builder.CreateSelect(Ops[0], Ops[1], Ops[2], "", SI);
2982 propagateIRFlags(Value, ValueList);
2983 return Value;
2984 }
2985 if (auto *CI = dyn_cast<CastInst>(I)) {
2986 auto *Value = Builder.CreateCast(CI->getOpcode(), Ops[0], DstTy);
2987 propagateIRFlags(Value, ValueList);
2988 return Value;
2989 }
2990 if (II) {
2991 auto *Value = Builder.CreateIntrinsic(DstTy, II->getIntrinsicID(), Ops);
2992 propagateIRFlags(Value, ValueList);
2993 return Value;
2994 }
2995 assert(isa<UnaryInstruction>(I) && "Unexpected instruction type in Generate");
2996 auto *Value =
2997 Builder.CreateUnOp((Instruction::UnaryOps)I->getOpcode(), Ops[0]);
2998 propagateIRFlags(Value, ValueList);
2999 return Value;
3000}
3001
3002// Starting from a shuffle, look up through operands tracking the shuffled index
3003// of each lane. If we can simplify away the shuffles to identities then
3004// do so.
3005bool VectorCombine::foldShuffleToIdentity(Instruction &I) {
3006 auto *Ty = dyn_cast<FixedVectorType>(I.getType());
3007 if (!Ty || I.use_empty())
3008 return false;
3009
3010 SmallVector<InstLane> Start(Ty->getNumElements());
3011 for (unsigned M = 0, E = Ty->getNumElements(); M < E; ++M)
3012 Start[M] = lookThroughShuffles(&*I.use_begin(), M);
3013
3015 Worklist.push_back(Start);
3016 SmallPtrSet<Use *, 4> IdentityLeafs, SplatLeafs, ConcatLeafs;
3017 unsigned NumVisited = 0;
3018
3019 while (!Worklist.empty()) {
3020 if (++NumVisited > MaxInstrsToScan)
3021 return false;
3022
3023 SmallVector<InstLane> Item = Worklist.pop_back_val();
3024 auto [FrontU, FrontLane] = Item.front();
3025
3026 // If we found an undef first lane then bail out to keep things simple.
3027 if (!FrontU)
3028 return false;
3029
3030 // Helper to peek through bitcasts to the same value.
3031 auto IsEquiv = [&](Value *X, Value *Y) {
3032 return X->getType() == Y->getType() &&
3034 };
3035
3036 // Look for an identity value.
3037 if (FrontLane == 0 &&
3038 cast<FixedVectorType>(FrontU->get()->getType())->getNumElements() ==
3039 Ty->getNumElements() &&
3040 all_of(drop_begin(enumerate(Item)), [IsEquiv, Item](const auto &E) {
3041 Value *FrontV = Item.front().first->get();
3042 return !E.value().first || (IsEquiv(E.value().first->get(), FrontV) &&
3043 E.value().second == (int)E.index());
3044 })) {
3045 IdentityLeafs.insert(FrontU);
3046 continue;
3047 }
3048 // Look for constants, for the moment only supporting constant splats.
3049 if (auto *C = dyn_cast<Constant>(FrontU);
3050 C && C->getSplatValue() &&
3051 all_of(drop_begin(Item), [Item](InstLane &IL) {
3052 Value *FrontV = Item.front().first->get();
3053 Use *U = IL.first;
3054 return !U || (isa<Constant>(U->get()) &&
3055 cast<Constant>(U->get())->getSplatValue() ==
3056 cast<Constant>(FrontV)->getSplatValue());
3057 })) {
3058 SplatLeafs.insert(FrontU);
3059 continue;
3060 }
3061 // Look for a splat value.
3062 if (all_of(drop_begin(Item), [Item](InstLane &IL) {
3063 auto [FrontU, FrontLane] = Item.front();
3064 auto [U, Lane] = IL;
3065 return !U || (U->get() == FrontU->get() && Lane == FrontLane);
3066 })) {
3067 SplatLeafs.insert(FrontU);
3068 continue;
3069 }
3070
3071 // We need each element to be the same type of value, and check that each
3072 // element has a single use.
3073 auto CheckLaneIsEquivalentToFirst = [Item](InstLane IL) {
3074 Value *FrontV = Item.front().first->get();
3075 if (!IL.first)
3076 return true;
3077 Value *V = IL.first->get();
3078 if (auto *I = dyn_cast<Instruction>(V); I && !I->hasOneUser())
3079 return false;
3080 if (V->getValueID() != FrontV->getValueID())
3081 return false;
3082 if (auto *CI = dyn_cast<CmpInst>(V))
3083 if (CI->getPredicate() != cast<CmpInst>(FrontV)->getPredicate())
3084 return false;
3085 if (auto *CI = dyn_cast<CastInst>(V))
3086 if (CI->getSrcTy()->getScalarType() !=
3087 cast<CastInst>(FrontV)->getSrcTy()->getScalarType())
3088 return false;
3089 if (auto *SI = dyn_cast<SelectInst>(V))
3090 if (!isa<VectorType>(SI->getOperand(0)->getType()) ||
3091 SI->getOperand(0)->getType() !=
3092 cast<SelectInst>(FrontV)->getOperand(0)->getType())
3093 return false;
3094 if (isa<CallInst>(V) && !isa<IntrinsicInst>(V))
3095 return false;
3096 auto *II = dyn_cast<IntrinsicInst>(V);
3097 return !II || (isa<IntrinsicInst>(FrontV) &&
3098 II->getIntrinsicID() ==
3099 cast<IntrinsicInst>(FrontV)->getIntrinsicID() &&
3100 !II->hasOperandBundles());
3101 };
3102 if (all_of(drop_begin(Item), CheckLaneIsEquivalentToFirst)) {
3103 // Check the operator is one that we support.
3104 if (isa<BinaryOperator, CmpInst>(FrontU)) {
3105 // We exclude div/rem in case they hit UB from poison lanes.
3106 if (auto *BO = dyn_cast<BinaryOperator>(FrontU);
3107 BO && BO->isIntDivRem())
3108 return false;
3111 continue;
3112 } else if (isa<UnaryOperator, TruncInst, ZExtInst, SExtInst, FPToSIInst,
3113 FPToUIInst, SIToFPInst, UIToFPInst>(FrontU)) {
3115 continue;
3116 } else if (auto *BitCast = dyn_cast<BitCastInst>(FrontU)) {
3117 // TODO: Handle vector widening/narrowing bitcasts.
3118 auto *DstTy = dyn_cast<FixedVectorType>(BitCast->getDestTy());
3119 auto *SrcTy = dyn_cast<FixedVectorType>(BitCast->getSrcTy());
3120 if (DstTy && SrcTy &&
3121 SrcTy->getNumElements() == DstTy->getNumElements()) {
3123 continue;
3124 }
3125 } else if (isa<SelectInst>(FrontU)) {
3129 continue;
3130 } else if (auto *II = dyn_cast<IntrinsicInst>(FrontU);
3131 II && isTriviallyVectorizable(II->getIntrinsicID()) &&
3132 !II->hasOperandBundles()) {
3133 for (unsigned Op = 0, E = II->getNumOperands() - 1; Op < E; Op++) {
3134 if (isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Op,
3135 &TTI)) {
3136 if (!all_of(drop_begin(Item), [Item, Op](InstLane &IL) {
3137 Value *FrontV = Item.front().first->get();
3138 Use *U = IL.first;
3139 return !U || (cast<Instruction>(U->get())->getOperand(Op) ==
3140 cast<Instruction>(FrontV)->getOperand(Op));
3141 }))
3142 return false;
3143 continue;
3144 }
3146 }
3147 continue;
3148 }
3149 }
3150
3151 if (isFreeConcat(Item, CostKind, TTI)) {
3152 ConcatLeafs.insert(FrontU);
3153 continue;
3154 }
3155
3156 return false;
3157 }
3158
3159 if (NumVisited <= 1)
3160 return false;
3161
3162 LLVM_DEBUG(dbgs() << "Found a superfluous identity shuffle: " << I << "\n");
3163
3164 // If we got this far, we know the shuffles are superfluous and can be
3165 // removed. Scan through again and generate the new tree of instructions.
3166 Builder.SetInsertPoint(&I);
3167 Value *V = generateNewInstTree(Start, Ty, IdentityLeafs, SplatLeafs,
3168 ConcatLeafs, Builder, &TTI);
3169 replaceValue(I, *V);
3170 return true;
3171}
3172
3173/// Given a commutative reduction, the order of the input lanes does not alter
3174/// the results. We can use this to remove certain shuffles feeding the
3175/// reduction, removing the need to shuffle at all.
3176bool VectorCombine::foldShuffleFromReductions(Instruction &I) {
3177 auto *II = dyn_cast<IntrinsicInst>(&I);
3178 if (!II)
3179 return false;
3180 switch (II->getIntrinsicID()) {
3181 case Intrinsic::vector_reduce_add:
3182 case Intrinsic::vector_reduce_mul:
3183 case Intrinsic::vector_reduce_and:
3184 case Intrinsic::vector_reduce_or:
3185 case Intrinsic::vector_reduce_xor:
3186 case Intrinsic::vector_reduce_smin:
3187 case Intrinsic::vector_reduce_smax:
3188 case Intrinsic::vector_reduce_umin:
3189 case Intrinsic::vector_reduce_umax:
3190 break;
3191 default:
3192 return false;
3193 }
3194
3195 // Find all the inputs when looking through operations that do not alter the
3196 // lane order (binops, for example). Currently we look for a single shuffle,
3197 // and can ignore splat values.
3198 std::queue<Value *> Worklist;
3199 SmallPtrSet<Value *, 4> Visited;
3200 ShuffleVectorInst *Shuffle = nullptr;
3201 if (auto *Op = dyn_cast<Instruction>(I.getOperand(0)))
3202 Worklist.push(Op);
3203
3204 while (!Worklist.empty()) {
3205 Value *CV = Worklist.front();
3206 Worklist.pop();
3207 if (Visited.contains(CV))
3208 continue;
3209
3210 // Splats don't change the order, so can be safely ignored.
3211 if (isSplatValue(CV))
3212 continue;
3213
3214 Visited.insert(CV);
3215
3216 if (auto *CI = dyn_cast<Instruction>(CV)) {
3217 if (CI->isBinaryOp()) {
3218 for (auto *Op : CI->operand_values())
3219 Worklist.push(Op);
3220 continue;
3221 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(CI)) {
3222 if (Shuffle && Shuffle != SV)
3223 return false;
3224 Shuffle = SV;
3225 continue;
3226 }
3227 }
3228
3229 // Anything else is currently an unknown node.
3230 return false;
3231 }
3232
3233 if (!Shuffle)
3234 return false;
3235
3236 // Check all uses of the binary ops and shuffles are also included in the
3237 // lane-invariant operations (Visited should be the list of lanewise
3238 // instructions, including the shuffle that we found).
3239 for (auto *V : Visited)
3240 for (auto *U : V->users())
3241 if (!Visited.contains(U) && U != &I)
3242 return false;
3243
3244 FixedVectorType *VecType =
3245 dyn_cast<FixedVectorType>(II->getOperand(0)->getType());
3246 if (!VecType)
3247 return false;
3248 FixedVectorType *ShuffleInputType =
3250 if (!ShuffleInputType)
3251 return false;
3252 unsigned NumInputElts = ShuffleInputType->getNumElements();
3253
3254 // Find the mask from sorting the lanes into order. This is most likely to
3255 // become a identity or concat mask. Undef elements are pushed to the end.
3256 SmallVector<int> ConcatMask;
3257 Shuffle->getShuffleMask(ConcatMask);
3258 sort(ConcatMask, [](int X, int Y) { return (unsigned)X < (unsigned)Y; });
3259 bool UsesSecondVec =
3260 any_of(ConcatMask, [&](int M) { return M >= (int)NumInputElts; });
3261
3263 UsesSecondVec ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc, VecType,
3264 ShuffleInputType, Shuffle->getShuffleMask(), CostKind);
3266 UsesSecondVec ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc, VecType,
3267 ShuffleInputType, ConcatMask, CostKind);
3268
3269 LLVM_DEBUG(dbgs() << "Found a reduction feeding from a shuffle: " << *Shuffle
3270 << "\n");
3271 LLVM_DEBUG(dbgs() << " OldCost: " << OldCost << " vs NewCost: " << NewCost
3272 << "\n");
3273 bool MadeChanges = false;
3274 if (NewCost < OldCost) {
3275 Builder.SetInsertPoint(Shuffle);
3276 Value *NewShuffle = Builder.CreateShuffleVector(
3277 Shuffle->getOperand(0), Shuffle->getOperand(1), ConcatMask);
3278 LLVM_DEBUG(dbgs() << "Created new shuffle: " << *NewShuffle << "\n");
3279 replaceValue(*Shuffle, *NewShuffle);
3280 return true;
3281 }
3282
3283 // See if we can re-use foldSelectShuffle, getting it to reduce the size of
3284 // the shuffle into a nicer order, as it can ignore the order of the shuffles.
3285 MadeChanges |= foldSelectShuffle(*Shuffle, true);
3286 return MadeChanges;
3287}
3288
3289/// For a given chain of patterns of the following form:
3290///
3291/// ```
3292/// %1 = shufflevector <n x ty1> %0, <n x ty1> poison <n x ty2> mask
3293///
3294/// %2 = tail call <n x ty1> llvm.<umin/umax/smin/smax>(<n x ty1> %0, <n x
3295/// ty1> %1)
3296/// OR
3297/// %2 = add/mul/or/and/xor <n x ty1> %0, %1
3298///
3299/// %3 = shufflevector <n x ty1> %2, <n x ty1> poison <n x ty2> mask
3300/// ...
3301/// ...
3302/// %(i - 1) = tail call <n x ty1> llvm.<umin/umax/smin/smax>(<n x ty1> %(i -
3303/// 3), <n x ty1> %(i - 2)
3304/// OR
3305/// %(i - 1) = add/mul/or/and/xor <n x ty1> %(i - 3), %(i - 2)
3306///
3307/// %(i) = extractelement <n x ty1> %(i - 1), 0
3308/// ```
3309///
3310/// Where:
3311/// `mask` follows a partition pattern:
3312///
3313/// Ex:
3314/// [n = 8, p = poison]
3315///
3316/// 4 5 6 7 | p p p p
3317/// 2 3 | p p p p p p
3318/// 1 | p p p p p p p
3319///
3320/// For powers of 2, there's a consistent pattern, but for other cases
3321/// the parity of the current half value at each step decides the
3322/// next partition half (see `ExpectedParityMask` for more logical details
3323/// in generalising this).
3324///
3325/// Ex:
3326/// [n = 6]
3327///
3328/// 3 4 5 | p p p
3329/// 1 2 | p p p p
3330/// 1 | p p p p p
3331bool VectorCombine::foldShuffleChainsToReduce(Instruction &I) {
3332 // Going bottom-up for the pattern.
3333 std::queue<Value *> InstWorklist;
3334 InstructionCost OrigCost = 0;
3335
3336 // Common instruction operation after each shuffle op.
3337 std::optional<unsigned int> CommonCallOp = std::nullopt;
3338 std::optional<Instruction::BinaryOps> CommonBinOp = std::nullopt;
3339
3340 bool IsFirstCallOrBinInst = true;
3341 bool ShouldBeCallOrBinInst = true;
3342
3343 // This stores the last used instructions for shuffle/common op.
3344 //
3345 // PrevVecV[0] / PrevVecV[1] store the last two simultaneous
3346 // instructions from either shuffle/common op.
3347 SmallVector<Value *, 2> PrevVecV(2, nullptr);
3348
3349 Value *VecOpEE;
3350 if (!match(&I, m_ExtractElt(m_Value(VecOpEE), m_Zero())))
3351 return false;
3352
3353 auto *FVT = dyn_cast<FixedVectorType>(VecOpEE->getType());
3354 if (!FVT)
3355 return false;
3356
3357 int64_t VecSize = FVT->getNumElements();
3358 if (VecSize < 2)
3359 return false;
3360
3361 // Number of levels would be ~log2(n), considering we always partition
3362 // by half for this fold pattern.
3363 unsigned int NumLevels = Log2_64_Ceil(VecSize), VisitedCnt = 0;
3364 int64_t ShuffleMaskHalf = 1, ExpectedParityMask = 0;
3365
3366 // This is how we generalise for all element sizes.
3367 // At each step, if vector size is odd, we need non-poison
3368 // values to cover the dominant half so we don't miss out on any element.
3369 //
3370 // This mask will help us retrieve this as we go from bottom to top:
3371 //
3372 // Mask Set -> N = N * 2 - 1
3373 // Mask Unset -> N = N * 2
3374 for (int Cur = VecSize, Mask = NumLevels - 1; Cur > 1;
3375 Cur = (Cur + 1) / 2, --Mask) {
3376 if (Cur & 1)
3377 ExpectedParityMask |= (1ll << Mask);
3378 }
3379
3380 InstWorklist.push(VecOpEE);
3381
3382 while (!InstWorklist.empty()) {
3383 Value *CI = InstWorklist.front();
3384 InstWorklist.pop();
3385
3386 if (auto *II = dyn_cast<IntrinsicInst>(CI)) {
3387 if (!ShouldBeCallOrBinInst)
3388 return false;
3389
3390 if (!IsFirstCallOrBinInst &&
3391 any_of(PrevVecV, [](Value *VecV) { return VecV == nullptr; }))
3392 return false;
3393
3394 // For the first found call/bin op, the vector has to come from the
3395 // extract element op.
3396 if (II != (IsFirstCallOrBinInst ? VecOpEE : PrevVecV[0]))
3397 return false;
3398 IsFirstCallOrBinInst = false;
3399
3400 if (!CommonCallOp)
3401 CommonCallOp = II->getIntrinsicID();
3402 if (II->getIntrinsicID() != *CommonCallOp)
3403 return false;
3404
3405 switch (II->getIntrinsicID()) {
3406 case Intrinsic::umin:
3407 case Intrinsic::umax:
3408 case Intrinsic::smin:
3409 case Intrinsic::smax: {
3410 auto *Op0 = II->getOperand(0);
3411 auto *Op1 = II->getOperand(1);
3412 PrevVecV[0] = Op0;
3413 PrevVecV[1] = Op1;
3414 break;
3415 }
3416 default:
3417 return false;
3418 }
3419 ShouldBeCallOrBinInst ^= 1;
3420
3421 IntrinsicCostAttributes ICA(
3422 *CommonCallOp, II->getType(),
3423 {PrevVecV[0]->getType(), PrevVecV[1]->getType()});
3424 OrigCost += TTI.getIntrinsicInstrCost(ICA, CostKind);
3425
3426 // We may need a swap here since it can be (a, b) or (b, a)
3427 // and accordingly change as we go up.
3428 if (!isa<ShuffleVectorInst>(PrevVecV[1]))
3429 std::swap(PrevVecV[0], PrevVecV[1]);
3430 InstWorklist.push(PrevVecV[1]);
3431 InstWorklist.push(PrevVecV[0]);
3432 } else if (auto *BinOp = dyn_cast<BinaryOperator>(CI)) {
3433 // Similar logic for bin ops.
3434
3435 if (!ShouldBeCallOrBinInst)
3436 return false;
3437
3438 if (!IsFirstCallOrBinInst &&
3439 any_of(PrevVecV, [](Value *VecV) { return VecV == nullptr; }))
3440 return false;
3441
3442 if (BinOp != (IsFirstCallOrBinInst ? VecOpEE : PrevVecV[0]))
3443 return false;
3444 IsFirstCallOrBinInst = false;
3445
3446 if (!CommonBinOp)
3447 CommonBinOp = BinOp->getOpcode();
3448
3449 if (BinOp->getOpcode() != *CommonBinOp)
3450 return false;
3451
3452 switch (*CommonBinOp) {
3453 case BinaryOperator::Add:
3454 case BinaryOperator::Mul:
3455 case BinaryOperator::Or:
3456 case BinaryOperator::And:
3457 case BinaryOperator::Xor: {
3458 auto *Op0 = BinOp->getOperand(0);
3459 auto *Op1 = BinOp->getOperand(1);
3460 PrevVecV[0] = Op0;
3461 PrevVecV[1] = Op1;
3462 break;
3463 }
3464 default:
3465 return false;
3466 }
3467 ShouldBeCallOrBinInst ^= 1;
3468
3469 OrigCost +=
3470 TTI.getArithmeticInstrCost(*CommonBinOp, BinOp->getType(), CostKind);
3471
3472 if (!isa<ShuffleVectorInst>(PrevVecV[1]))
3473 std::swap(PrevVecV[0], PrevVecV[1]);
3474 InstWorklist.push(PrevVecV[1]);
3475 InstWorklist.push(PrevVecV[0]);
3476 } else if (auto *SVInst = dyn_cast<ShuffleVectorInst>(CI)) {
3477 // We shouldn't have any null values in the previous vectors,
3478 // is so, there was a mismatch in pattern.
3479 if (ShouldBeCallOrBinInst ||
3480 any_of(PrevVecV, [](Value *VecV) { return VecV == nullptr; }))
3481 return false;
3482
3483 if (SVInst != PrevVecV[1])
3484 return false;
3485
3486 ArrayRef<int> CurMask;
3487 if (!match(SVInst, m_Shuffle(m_Specific(PrevVecV[0]), m_Poison(),
3488 m_Mask(CurMask))))
3489 return false;
3490
3491 // Subtract the parity mask when checking the condition.
3492 for (int Mask = 0, MaskSize = CurMask.size(); Mask != MaskSize; ++Mask) {
3493 if (Mask < ShuffleMaskHalf &&
3494 CurMask[Mask] != ShuffleMaskHalf + Mask - (ExpectedParityMask & 1))
3495 return false;
3496 if (Mask >= ShuffleMaskHalf && CurMask[Mask] != -1)
3497 return false;
3498 }
3499
3500 // Update mask values.
3501 ShuffleMaskHalf *= 2;
3502 ShuffleMaskHalf -= (ExpectedParityMask & 1);
3503 ExpectedParityMask >>= 1;
3504
3506 SVInst->getType(), SVInst->getType(),
3507 CurMask, CostKind);
3508
3509 VisitedCnt += 1;
3510 if (!ExpectedParityMask && VisitedCnt == NumLevels)
3511 break;
3512
3513 ShouldBeCallOrBinInst ^= 1;
3514 } else {
3515 return false;
3516 }
3517 }
3518
3519 // Pattern should end with a shuffle op.
3520 if (ShouldBeCallOrBinInst)
3521 return false;
3522
3523 assert(VecSize != -1 && "Expected Match for Vector Size");
3524
3525 Value *FinalVecV = PrevVecV[0];
3526 if (!FinalVecV)
3527 return false;
3528
3529 auto *FinalVecVTy = cast<FixedVectorType>(FinalVecV->getType());
3530
3531 Intrinsic::ID ReducedOp =
3532 (CommonCallOp ? getMinMaxReductionIntrinsicID(*CommonCallOp)
3533 : getReductionForBinop(*CommonBinOp));
3534 if (!ReducedOp)
3535 return false;
3536
3537 IntrinsicCostAttributes ICA(ReducedOp, FinalVecVTy, {FinalVecV});
3539
3540 if (NewCost >= OrigCost)
3541 return false;
3542
3543 auto *ReducedResult =
3544 Builder.CreateIntrinsic(ReducedOp, {FinalVecV->getType()}, {FinalVecV});
3545 replaceValue(I, *ReducedResult);
3546
3547 return true;
3548}
3549
3550/// Determine if its more efficient to fold:
3551/// reduce(trunc(x)) -> trunc(reduce(x)).
3552/// reduce(sext(x)) -> sext(reduce(x)).
3553/// reduce(zext(x)) -> zext(reduce(x)).
3554bool VectorCombine::foldCastFromReductions(Instruction &I) {
3555 auto *II = dyn_cast<IntrinsicInst>(&I);
3556 if (!II)
3557 return false;
3558
3559 bool TruncOnly = false;
3560 Intrinsic::ID IID = II->getIntrinsicID();
3561 switch (IID) {
3562 case Intrinsic::vector_reduce_add:
3563 case Intrinsic::vector_reduce_mul:
3564 TruncOnly = true;
3565 break;
3566 case Intrinsic::vector_reduce_and:
3567 case Intrinsic::vector_reduce_or:
3568 case Intrinsic::vector_reduce_xor:
3569 break;
3570 default:
3571 return false;
3572 }
3573
3574 unsigned ReductionOpc = getArithmeticReductionInstruction(IID);
3575 Value *ReductionSrc = I.getOperand(0);
3576
3577 Value *Src;
3578 if (!match(ReductionSrc, m_OneUse(m_Trunc(m_Value(Src)))) &&
3579 (TruncOnly || !match(ReductionSrc, m_OneUse(m_ZExtOrSExt(m_Value(Src))))))
3580 return false;
3581
3582 auto CastOpc =
3583 (Instruction::CastOps)cast<Instruction>(ReductionSrc)->getOpcode();
3584
3585 auto *SrcTy = cast<VectorType>(Src->getType());
3586 auto *ReductionSrcTy = cast<VectorType>(ReductionSrc->getType());
3587 Type *ResultTy = I.getType();
3588
3590 ReductionOpc, ReductionSrcTy, std::nullopt, CostKind);
3591 OldCost += TTI.getCastInstrCost(CastOpc, ReductionSrcTy, SrcTy,
3593 cast<CastInst>(ReductionSrc));
3594 InstructionCost NewCost =
3595 TTI.getArithmeticReductionCost(ReductionOpc, SrcTy, std::nullopt,
3596 CostKind) +
3597 TTI.getCastInstrCost(CastOpc, ResultTy, ReductionSrcTy->getScalarType(),
3599
3600 if (OldCost <= NewCost || !NewCost.isValid())
3601 return false;
3602
3603 Value *NewReduction = Builder.CreateIntrinsic(SrcTy->getScalarType(),
3604 II->getIntrinsicID(), {Src});
3605 Value *NewCast = Builder.CreateCast(CastOpc, NewReduction, ResultTy);
3606 replaceValue(I, *NewCast);
3607 return true;
3608}
3609
3610/// Returns true if this ShuffleVectorInst eventually feeds into a
3611/// vector reduction intrinsic (e.g., vector_reduce_add) by only following
3612/// chains of shuffles and binary operators (in any combination/order).
3613/// The search does not go deeper than the given Depth.
3615 constexpr unsigned MaxVisited = 32;
3618 bool FoundReduction = false;
3619
3620 WorkList.push_back(SVI);
3621 while (!WorkList.empty()) {
3622 Instruction *I = WorkList.pop_back_val();
3623 for (User *U : I->users()) {
3624 auto *UI = cast<Instruction>(U);
3625 if (!UI || !Visited.insert(UI).second)
3626 continue;
3627 if (Visited.size() > MaxVisited)
3628 return false;
3629 if (auto *II = dyn_cast<IntrinsicInst>(UI)) {
3630 // More than one reduction reached
3631 if (FoundReduction)
3632 return false;
3633 switch (II->getIntrinsicID()) {
3634 case Intrinsic::vector_reduce_add:
3635 case Intrinsic::vector_reduce_mul:
3636 case Intrinsic::vector_reduce_and:
3637 case Intrinsic::vector_reduce_or:
3638 case Intrinsic::vector_reduce_xor:
3639 case Intrinsic::vector_reduce_smin:
3640 case Intrinsic::vector_reduce_smax:
3641 case Intrinsic::vector_reduce_umin:
3642 case Intrinsic::vector_reduce_umax:
3643 FoundReduction = true;
3644 continue;
3645 default:
3646 return false;
3647 }
3648 }
3649
3651 return false;
3652
3653 WorkList.emplace_back(UI);
3654 }
3655 }
3656 return FoundReduction;
3657}
3658
3659/// This method looks for groups of shuffles acting on binops, of the form:
3660/// %x = shuffle ...
3661/// %y = shuffle ...
3662/// %a = binop %x, %y
3663/// %b = binop %x, %y
3664/// shuffle %a, %b, selectmask
3665/// We may, especially if the shuffle is wider than legal, be able to convert
3666/// the shuffle to a form where only parts of a and b need to be computed. On
3667/// architectures with no obvious "select" shuffle, this can reduce the total
3668/// number of operations if the target reports them as cheaper.
3669bool VectorCombine::foldSelectShuffle(Instruction &I, bool FromReduction) {
3670 auto *SVI = cast<ShuffleVectorInst>(&I);
3671 auto *VT = cast<FixedVectorType>(I.getType());
3672 auto *Op0 = dyn_cast<Instruction>(SVI->getOperand(0));
3673 auto *Op1 = dyn_cast<Instruction>(SVI->getOperand(1));
3674 if (!Op0 || !Op1 || Op0 == Op1 || !Op0->isBinaryOp() || !Op1->isBinaryOp() ||
3675 VT != Op0->getType())
3676 return false;
3677
3678 auto *SVI0A = dyn_cast<Instruction>(Op0->getOperand(0));
3679 auto *SVI0B = dyn_cast<Instruction>(Op0->getOperand(1));
3680 auto *SVI1A = dyn_cast<Instruction>(Op1->getOperand(0));
3681 auto *SVI1B = dyn_cast<Instruction>(Op1->getOperand(1));
3682 SmallPtrSet<Instruction *, 4> InputShuffles({SVI0A, SVI0B, SVI1A, SVI1B});
3683 auto checkSVNonOpUses = [&](Instruction *I) {
3684 if (!I || I->getOperand(0)->getType() != VT)
3685 return true;
3686 return any_of(I->users(), [&](User *U) {
3687 return U != Op0 && U != Op1 &&
3688 !(isa<ShuffleVectorInst>(U) &&
3689 (InputShuffles.contains(cast<Instruction>(U)) ||
3690 isInstructionTriviallyDead(cast<Instruction>(U))));
3691 });
3692 };
3693 if (checkSVNonOpUses(SVI0A) || checkSVNonOpUses(SVI0B) ||
3694 checkSVNonOpUses(SVI1A) || checkSVNonOpUses(SVI1B))
3695 return false;
3696
3697 // Collect all the uses that are shuffles that we can transform together. We
3698 // may not have a single shuffle, but a group that can all be transformed
3699 // together profitably.
3701 auto collectShuffles = [&](Instruction *I) {
3702 for (auto *U : I->users()) {
3703 auto *SV = dyn_cast<ShuffleVectorInst>(U);
3704 if (!SV || SV->getType() != VT)
3705 return false;
3706 if ((SV->getOperand(0) != Op0 && SV->getOperand(0) != Op1) ||
3707 (SV->getOperand(1) != Op0 && SV->getOperand(1) != Op1))
3708 return false;
3709 if (!llvm::is_contained(Shuffles, SV))
3710 Shuffles.push_back(SV);
3711 }
3712 return true;
3713 };
3714 if (!collectShuffles(Op0) || !collectShuffles(Op1))
3715 return false;
3716 // From a reduction, we need to be processing a single shuffle, otherwise the
3717 // other uses will not be lane-invariant.
3718 if (FromReduction && Shuffles.size() > 1)
3719 return false;
3720
3721 // Add any shuffle uses for the shuffles we have found, to include them in our
3722 // cost calculations.
3723 if (!FromReduction) {
3724 for (ShuffleVectorInst *SV : Shuffles) {
3725 for (auto *U : SV->users()) {
3726 ShuffleVectorInst *SSV = dyn_cast<ShuffleVectorInst>(U);
3727 if (SSV && isa<UndefValue>(SSV->getOperand(1)) && SSV->getType() == VT)
3728 Shuffles.push_back(SSV);
3729 }
3730 }
3731 }
3732
3733 // For each of the output shuffles, we try to sort all the first vector
3734 // elements to the beginning, followed by the second array elements at the
3735 // end. If the binops are legalized to smaller vectors, this may reduce total
3736 // number of binops. We compute the ReconstructMask mask needed to convert
3737 // back to the original lane order.
3739 SmallVector<SmallVector<int>> OrigReconstructMasks;
3740 int MaxV1Elt = 0, MaxV2Elt = 0;
3741 unsigned NumElts = VT->getNumElements();
3742 for (ShuffleVectorInst *SVN : Shuffles) {
3743 SmallVector<int> Mask;
3744 SVN->getShuffleMask(Mask);
3745
3746 // Check the operands are the same as the original, or reversed (in which
3747 // case we need to commute the mask).
3748 Value *SVOp0 = SVN->getOperand(0);
3749 Value *SVOp1 = SVN->getOperand(1);
3750 if (isa<UndefValue>(SVOp1)) {
3751 auto *SSV = cast<ShuffleVectorInst>(SVOp0);
3752 SVOp0 = SSV->getOperand(0);
3753 SVOp1 = SSV->getOperand(1);
3754 for (int &Elem : Mask) {
3755 if (Elem >= static_cast<int>(SSV->getShuffleMask().size()))
3756 return false;
3757 Elem = Elem < 0 ? Elem : SSV->getMaskValue(Elem);
3758 }
3759 }
3760 if (SVOp0 == Op1 && SVOp1 == Op0) {
3761 std::swap(SVOp0, SVOp1);
3763 }
3764 if (SVOp0 != Op0 || SVOp1 != Op1)
3765 return false;
3766
3767 // Calculate the reconstruction mask for this shuffle, as the mask needed to
3768 // take the packed values from Op0/Op1 and reconstructing to the original
3769 // order.
3770 SmallVector<int> ReconstructMask;
3771 for (unsigned I = 0; I < Mask.size(); I++) {
3772 if (Mask[I] < 0) {
3773 ReconstructMask.push_back(-1);
3774 } else if (Mask[I] < static_cast<int>(NumElts)) {
3775 MaxV1Elt = std::max(MaxV1Elt, Mask[I]);
3776 auto It = find_if(V1, [&](const std::pair<int, int> &A) {
3777 return Mask[I] == A.first;
3778 });
3779 if (It != V1.end())
3780 ReconstructMask.push_back(It - V1.begin());
3781 else {
3782 ReconstructMask.push_back(V1.size());
3783 V1.emplace_back(Mask[I], V1.size());
3784 }
3785 } else {
3786 MaxV2Elt = std::max<int>(MaxV2Elt, Mask[I] - NumElts);
3787 auto It = find_if(V2, [&](const std::pair<int, int> &A) {
3788 return Mask[I] - static_cast<int>(NumElts) == A.first;
3789 });
3790 if (It != V2.end())
3791 ReconstructMask.push_back(NumElts + It - V2.begin());
3792 else {
3793 ReconstructMask.push_back(NumElts + V2.size());
3794 V2.emplace_back(Mask[I] - NumElts, NumElts + V2.size());
3795 }
3796 }
3797 }
3798
3799 // For reductions, we know that the lane ordering out doesn't alter the
3800 // result. In-order can help simplify the shuffle away.
3801 if (FromReduction)
3802 sort(ReconstructMask);
3803 OrigReconstructMasks.push_back(std::move(ReconstructMask));
3804 }
3805
3806 // If the Maximum element used from V1 and V2 are not larger than the new
3807 // vectors, the vectors are already packes and performing the optimization
3808 // again will likely not help any further. This also prevents us from getting
3809 // stuck in a cycle in case the costs do not also rule it out.
3810 if (V1.empty() || V2.empty() ||
3811 (MaxV1Elt == static_cast<int>(V1.size()) - 1 &&
3812 MaxV2Elt == static_cast<int>(V2.size()) - 1))
3813 return false;
3814
3815 // GetBaseMaskValue takes one of the inputs, which may either be a shuffle, a
3816 // shuffle of another shuffle, or not a shuffle (that is treated like a
3817 // identity shuffle).
3818 auto GetBaseMaskValue = [&](Instruction *I, int M) {
3819 auto *SV = dyn_cast<ShuffleVectorInst>(I);
3820 if (!SV)
3821 return M;
3822 if (isa<UndefValue>(SV->getOperand(1)))
3823 if (auto *SSV = dyn_cast<ShuffleVectorInst>(SV->getOperand(0)))
3824 if (InputShuffles.contains(SSV))
3825 return SSV->getMaskValue(SV->getMaskValue(M));
3826 return SV->getMaskValue(M);
3827 };
3828
3829 // Attempt to sort the inputs my ascending mask values to make simpler input
3830 // shuffles and push complex shuffles down to the uses. We sort on the first
3831 // of the two input shuffle orders, to try and get at least one input into a
3832 // nice order.
3833 auto SortBase = [&](Instruction *A, std::pair<int, int> X,
3834 std::pair<int, int> Y) {
3835 int MXA = GetBaseMaskValue(A, X.first);
3836 int MYA = GetBaseMaskValue(A, Y.first);
3837 return MXA < MYA;
3838 };
3839 stable_sort(V1, [&](std::pair<int, int> A, std::pair<int, int> B) {
3840 return SortBase(SVI0A, A, B);
3841 });
3842 stable_sort(V2, [&](std::pair<int, int> A, std::pair<int, int> B) {
3843 return SortBase(SVI1A, A, B);
3844 });
3845 // Calculate our ReconstructMasks from the OrigReconstructMasks and the
3846 // modified order of the input shuffles.
3847 SmallVector<SmallVector<int>> ReconstructMasks;
3848 for (const auto &Mask : OrigReconstructMasks) {
3849 SmallVector<int> ReconstructMask;
3850 for (int M : Mask) {
3851 auto FindIndex = [](const SmallVector<std::pair<int, int>> &V, int M) {
3852 auto It = find_if(V, [M](auto A) { return A.second == M; });
3853 assert(It != V.end() && "Expected all entries in Mask");
3854 return std::distance(V.begin(), It);
3855 };
3856 if (M < 0)
3857 ReconstructMask.push_back(-1);
3858 else if (M < static_cast<int>(NumElts)) {
3859 ReconstructMask.push_back(FindIndex(V1, M));
3860 } else {
3861 ReconstructMask.push_back(NumElts + FindIndex(V2, M));
3862 }
3863 }
3864 ReconstructMasks.push_back(std::move(ReconstructMask));
3865 }
3866
3867 // Calculate the masks needed for the new input shuffles, which get padded
3868 // with undef
3869 SmallVector<int> V1A, V1B, V2A, V2B;
3870 for (unsigned I = 0; I < V1.size(); I++) {
3871 V1A.push_back(GetBaseMaskValue(SVI0A, V1[I].first));
3872 V1B.push_back(GetBaseMaskValue(SVI0B, V1[I].first));
3873 }
3874 for (unsigned I = 0; I < V2.size(); I++) {
3875 V2A.push_back(GetBaseMaskValue(SVI1A, V2[I].first));
3876 V2B.push_back(GetBaseMaskValue(SVI1B, V2[I].first));
3877 }
3878 while (V1A.size() < NumElts) {
3881 }
3882 while (V2A.size() < NumElts) {
3885 }
3886
3887 auto AddShuffleCost = [&](InstructionCost C, Instruction *I) {
3888 auto *SV = dyn_cast<ShuffleVectorInst>(I);
3889 if (!SV)
3890 return C;
3891 return C + TTI.getShuffleCost(isa<UndefValue>(SV->getOperand(1))
3894 VT, VT, SV->getShuffleMask(), CostKind);
3895 };
3896 auto AddShuffleMaskCost = [&](InstructionCost C, ArrayRef<int> Mask) {
3897 return C +
3899 };
3900
3901 unsigned ElementSize = VT->getElementType()->getPrimitiveSizeInBits();
3902 unsigned MaxVectorSize =
3904 unsigned MaxElementsInVector = MaxVectorSize / ElementSize;
3905 if (MaxElementsInVector == 0)
3906 return false;
3907 // When there are multiple shufflevector operations on the same input,
3908 // especially when the vector length is larger than the register size,
3909 // identical shuffle patterns may occur across different groups of elements.
3910 // To avoid overestimating the cost by counting these repeated shuffles more
3911 // than once, we only account for unique shuffle patterns. This adjustment
3912 // prevents inflated costs in the cost model for wide vectors split into
3913 // several register-sized groups.
3914 std::set<SmallVector<int, 4>> UniqueShuffles;
3915 auto AddShuffleMaskAdjustedCost = [&](InstructionCost C, ArrayRef<int> Mask) {
3916 // Compute the cost for performing the shuffle over the full vector.
3917 auto ShuffleCost =
3919 unsigned NumFullVectors = Mask.size() / MaxElementsInVector;
3920 if (NumFullVectors < 2)
3921 return C + ShuffleCost;
3922 SmallVector<int, 4> SubShuffle(MaxElementsInVector);
3923 unsigned NumUniqueGroups = 0;
3924 unsigned NumGroups = Mask.size() / MaxElementsInVector;
3925 // For each group of MaxElementsInVector contiguous elements,
3926 // collect their shuffle pattern and insert into the set of unique patterns.
3927 for (unsigned I = 0; I < NumFullVectors; ++I) {
3928 for (unsigned J = 0; J < MaxElementsInVector; ++J)
3929 SubShuffle[J] = Mask[MaxElementsInVector * I + J];
3930 if (UniqueShuffles.insert(SubShuffle).second)
3931 NumUniqueGroups += 1;
3932 }
3933 return C + ShuffleCost * NumUniqueGroups / NumGroups;
3934 };
3935 auto AddShuffleAdjustedCost = [&](InstructionCost C, Instruction *I) {
3936 auto *SV = dyn_cast<ShuffleVectorInst>(I);
3937 if (!SV)
3938 return C;
3939 SmallVector<int, 16> Mask;
3940 SV->getShuffleMask(Mask);
3941 return AddShuffleMaskAdjustedCost(C, Mask);
3942 };
3943 // Check that input consists of ShuffleVectors applied to the same input
3944 auto AllShufflesHaveSameOperands =
3945 [](SmallPtrSetImpl<Instruction *> &InputShuffles) {
3946 if (InputShuffles.size() < 2)
3947 return false;
3948 ShuffleVectorInst *FirstSV =
3949 dyn_cast<ShuffleVectorInst>(*InputShuffles.begin());
3950 if (!FirstSV)
3951 return false;
3952
3953 Value *In0 = FirstSV->getOperand(0), *In1 = FirstSV->getOperand(1);
3954 return std::all_of(
3955 std::next(InputShuffles.begin()), InputShuffles.end(),
3956 [&](Instruction *I) {
3957 ShuffleVectorInst *SV = dyn_cast<ShuffleVectorInst>(I);
3958 return SV && SV->getOperand(0) == In0 && SV->getOperand(1) == In1;
3959 });
3960 };
3961
3962 // Get the costs of the shuffles + binops before and after with the new
3963 // shuffle masks.
3964 InstructionCost CostBefore =
3965 TTI.getArithmeticInstrCost(Op0->getOpcode(), VT, CostKind) +
3966 TTI.getArithmeticInstrCost(Op1->getOpcode(), VT, CostKind);
3967 CostBefore += std::accumulate(Shuffles.begin(), Shuffles.end(),
3968 InstructionCost(0), AddShuffleCost);
3969 if (AllShufflesHaveSameOperands(InputShuffles)) {
3970 UniqueShuffles.clear();
3971 CostBefore += std::accumulate(InputShuffles.begin(), InputShuffles.end(),
3972 InstructionCost(0), AddShuffleAdjustedCost);
3973 } else {
3974 CostBefore += std::accumulate(InputShuffles.begin(), InputShuffles.end(),
3975 InstructionCost(0), AddShuffleCost);
3976 }
3977
3978 // The new binops will be unused for lanes past the used shuffle lengths.
3979 // These types attempt to get the correct cost for that from the target.
3980 FixedVectorType *Op0SmallVT =
3981 FixedVectorType::get(VT->getScalarType(), V1.size());
3982 FixedVectorType *Op1SmallVT =
3983 FixedVectorType::get(VT->getScalarType(), V2.size());
3984 InstructionCost CostAfter =
3985 TTI.getArithmeticInstrCost(Op0->getOpcode(), Op0SmallVT, CostKind) +
3986 TTI.getArithmeticInstrCost(Op1->getOpcode(), Op1SmallVT, CostKind);
3987 UniqueShuffles.clear();
3988 CostAfter += std::accumulate(ReconstructMasks.begin(), ReconstructMasks.end(),
3989 InstructionCost(0), AddShuffleMaskAdjustedCost);
3990 std::set<SmallVector<int>> OutputShuffleMasks({V1A, V1B, V2A, V2B});
3991 CostAfter +=
3992 std::accumulate(OutputShuffleMasks.begin(), OutputShuffleMasks.end(),
3993 InstructionCost(0), AddShuffleMaskCost);
3994
3995 LLVM_DEBUG(dbgs() << "Found a binop select shuffle pattern: " << I << "\n");
3996 LLVM_DEBUG(dbgs() << " CostBefore: " << CostBefore
3997 << " vs CostAfter: " << CostAfter << "\n");
3998 if (CostBefore < CostAfter ||
3999 (CostBefore == CostAfter && !feedsIntoVectorReduction(SVI)))
4000 return false;
4001
4002 // The cost model has passed, create the new instructions.
4003 auto GetShuffleOperand = [&](Instruction *I, unsigned Op) -> Value * {
4004 auto *SV = dyn_cast<ShuffleVectorInst>(I);
4005 if (!SV)
4006 return I;
4007 if (isa<UndefValue>(SV->getOperand(1)))
4008 if (auto *SSV = dyn_cast<ShuffleVectorInst>(SV->getOperand(0)))
4009 if (InputShuffles.contains(SSV))
4010 return SSV->getOperand(Op);
4011 return SV->getOperand(Op);
4012 };
4013 Builder.SetInsertPoint(*SVI0A->getInsertionPointAfterDef());
4014 Value *NSV0A = Builder.CreateShuffleVector(GetShuffleOperand(SVI0A, 0),
4015 GetShuffleOperand(SVI0A, 1), V1A);
4016 Builder.SetInsertPoint(*SVI0B->getInsertionPointAfterDef());
4017 Value *NSV0B = Builder.CreateShuffleVector(GetShuffleOperand(SVI0B, 0),
4018 GetShuffleOperand(SVI0B, 1), V1B);
4019 Builder.SetInsertPoint(*SVI1A->getInsertionPointAfterDef());
4020 Value *NSV1A = Builder.CreateShuffleVector(GetShuffleOperand(SVI1A, 0),
4021 GetShuffleOperand(SVI1A, 1), V2A);
4022 Builder.SetInsertPoint(*SVI1B->getInsertionPointAfterDef());
4023 Value *NSV1B = Builder.CreateShuffleVector(GetShuffleOperand(SVI1B, 0),
4024 GetShuffleOperand(SVI1B, 1), V2B);
4025 Builder.SetInsertPoint(Op0);
4026 Value *NOp0 = Builder.CreateBinOp((Instruction::BinaryOps)Op0->getOpcode(),
4027 NSV0A, NSV0B);
4028 if (auto *I = dyn_cast<Instruction>(NOp0))
4029 I->copyIRFlags(Op0, true);
4030 Builder.SetInsertPoint(Op1);
4031 Value *NOp1 = Builder.CreateBinOp((Instruction::BinaryOps)Op1->getOpcode(),
4032 NSV1A, NSV1B);
4033 if (auto *I = dyn_cast<Instruction>(NOp1))
4034 I->copyIRFlags(Op1, true);
4035
4036 for (int S = 0, E = ReconstructMasks.size(); S != E; S++) {
4037 Builder.SetInsertPoint(Shuffles[S]);
4038 Value *NSV = Builder.CreateShuffleVector(NOp0, NOp1, ReconstructMasks[S]);
4039 replaceValue(*Shuffles[S], *NSV, false);
4040 }
4041
4042 Worklist.pushValue(NSV0A);
4043 Worklist.pushValue(NSV0B);
4044 Worklist.pushValue(NSV1A);
4045 Worklist.pushValue(NSV1B);
4046 return true;
4047}
4048
4049/// Check if instruction depends on ZExt and this ZExt can be moved after the
4050/// instruction. Move ZExt if it is profitable. For example:
4051/// logic(zext(x),y) -> zext(logic(x,trunc(y)))
4052/// lshr((zext(x),y) -> zext(lshr(x,trunc(y)))
4053/// Cost model calculations takes into account if zext(x) has other users and
4054/// whether it can be propagated through them too.
4055bool VectorCombine::shrinkType(Instruction &I) {
4056 Value *ZExted, *OtherOperand;
4057 if (!match(&I, m_c_BitwiseLogic(m_ZExt(m_Value(ZExted)),
4058 m_Value(OtherOperand))) &&
4059 !match(&I, m_LShr(m_ZExt(m_Value(ZExted)), m_Value(OtherOperand))))
4060 return false;
4061
4062 Value *ZExtOperand = I.getOperand(I.getOperand(0) == OtherOperand ? 1 : 0);
4063
4064 auto *BigTy = cast<FixedVectorType>(I.getType());
4065 auto *SmallTy = cast<FixedVectorType>(ZExted->getType());
4066 unsigned BW = SmallTy->getElementType()->getPrimitiveSizeInBits();
4067
4068 if (I.getOpcode() == Instruction::LShr) {
4069 // Check that the shift amount is less than the number of bits in the
4070 // smaller type. Otherwise, the smaller lshr will return a poison value.
4071 KnownBits ShAmtKB = computeKnownBits(I.getOperand(1), *DL);
4072 if (ShAmtKB.getMaxValue().uge(BW))
4073 return false;
4074 } else {
4075 // Check that the expression overall uses at most the same number of bits as
4076 // ZExted
4077 KnownBits KB = computeKnownBits(&I, *DL);
4078 if (KB.countMaxActiveBits() > BW)
4079 return false;
4080 }
4081
4082 // Calculate costs of leaving current IR as it is and moving ZExt operation
4083 // later, along with adding truncates if needed
4085 Instruction::ZExt, BigTy, SmallTy,
4086 TargetTransformInfo::CastContextHint::None, CostKind);
4087 InstructionCost CurrentCost = ZExtCost;
4088 InstructionCost ShrinkCost = 0;
4089
4090 // Calculate total cost and check that we can propagate through all ZExt users
4091 for (User *U : ZExtOperand->users()) {
4092 auto *UI = cast<Instruction>(U);
4093 if (UI == &I) {
4094 CurrentCost +=
4095 TTI.getArithmeticInstrCost(UI->getOpcode(), BigTy, CostKind);
4096 ShrinkCost +=
4097 TTI.getArithmeticInstrCost(UI->getOpcode(), SmallTy, CostKind);
4098 ShrinkCost += ZExtCost;
4099 continue;
4100 }
4101
4102 if (!Instruction::isBinaryOp(UI->getOpcode()))
4103 return false;
4104
4105 // Check if we can propagate ZExt through its other users
4106 KnownBits KB = computeKnownBits(UI, *DL);
4107 if (KB.countMaxActiveBits() > BW)
4108 return false;
4109
4110 CurrentCost += TTI.getArithmeticInstrCost(UI->getOpcode(), BigTy, CostKind);
4111 ShrinkCost +=
4112 TTI.getArithmeticInstrCost(UI->getOpcode(), SmallTy, CostKind);
4113 ShrinkCost += ZExtCost;
4114 }
4115
4116 // If the other instruction operand is not a constant, we'll need to
4117 // generate a truncate instruction. So we have to adjust cost
4118 if (!isa<Constant>(OtherOperand))
4119 ShrinkCost += TTI.getCastInstrCost(
4120 Instruction::Trunc, SmallTy, BigTy,
4121 TargetTransformInfo::CastContextHint::None, CostKind);
4122
4123 // If the cost of shrinking types and leaving the IR is the same, we'll lean
4124 // towards modifying the IR because shrinking opens opportunities for other
4125 // shrinking optimisations.
4126 if (ShrinkCost > CurrentCost)
4127 return false;
4128
4129 Builder.SetInsertPoint(&I);
4130 Value *Op0 = ZExted;
4131 Value *Op1 = Builder.CreateTrunc(OtherOperand, SmallTy);
4132 // Keep the order of operands the same
4133 if (I.getOperand(0) == OtherOperand)
4134 std::swap(Op0, Op1);
4135 Value *NewBinOp =
4136 Builder.CreateBinOp((Instruction::BinaryOps)I.getOpcode(), Op0, Op1);
4137 cast<Instruction>(NewBinOp)->copyIRFlags(&I);
4138 cast<Instruction>(NewBinOp)->copyMetadata(I);
4139 Value *NewZExtr = Builder.CreateZExt(NewBinOp, BigTy);
4140 replaceValue(I, *NewZExtr);
4141 return true;
4142}
4143
4144/// insert (DstVec, (extract SrcVec, ExtIdx), InsIdx) -->
4145/// shuffle (DstVec, SrcVec, Mask)
4146bool VectorCombine::foldInsExtVectorToShuffle(Instruction &I) {
4147 Value *DstVec, *SrcVec;
4148 uint64_t ExtIdx, InsIdx;
4149 if (!match(&I,
4150 m_InsertElt(m_Value(DstVec),
4151 m_ExtractElt(m_Value(SrcVec), m_ConstantInt(ExtIdx)),
4152 m_ConstantInt(InsIdx))))
4153 return false;
4154
4155 auto *DstVecTy = dyn_cast<FixedVectorType>(I.getType());
4156 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcVec->getType());
4157 // We can try combining vectors with different element sizes.
4158 if (!DstVecTy || !SrcVecTy ||
4159 SrcVecTy->getElementType() != DstVecTy->getElementType())
4160 return false;
4161
4162 unsigned NumDstElts = DstVecTy->getNumElements();
4163 unsigned NumSrcElts = SrcVecTy->getNumElements();
4164 if (InsIdx >= NumDstElts || ExtIdx >= NumSrcElts || NumDstElts == 1)
4165 return false;
4166
4167 // Insertion into poison is a cheaper single operand shuffle.
4169 SmallVector<int> Mask(NumDstElts, PoisonMaskElem);
4170
4171 bool NeedExpOrNarrow = NumSrcElts != NumDstElts;
4172 bool IsExtIdxInBounds = ExtIdx < NumDstElts;
4173 bool NeedDstSrcSwap = isa<PoisonValue>(DstVec) && !isa<UndefValue>(SrcVec);
4174 if (NeedDstSrcSwap) {
4176 if (!IsExtIdxInBounds && NeedExpOrNarrow)
4177 Mask[InsIdx] = 0;
4178 else
4179 Mask[InsIdx] = ExtIdx;
4180 std::swap(DstVec, SrcVec);
4181 } else {
4183 std::iota(Mask.begin(), Mask.end(), 0);
4184 if (!IsExtIdxInBounds && NeedExpOrNarrow)
4185 Mask[InsIdx] = NumDstElts;
4186 else
4187 Mask[InsIdx] = ExtIdx + NumDstElts;
4188 }
4189
4190 // Cost
4191 auto *Ins = cast<InsertElementInst>(&I);
4192 auto *Ext = cast<ExtractElementInst>(I.getOperand(1));
4193 InstructionCost InsCost =
4194 TTI.getVectorInstrCost(*Ins, DstVecTy, CostKind, InsIdx);
4195 InstructionCost ExtCost =
4196 TTI.getVectorInstrCost(*Ext, DstVecTy, CostKind, ExtIdx);
4197 InstructionCost OldCost = ExtCost + InsCost;
4198
4199 InstructionCost NewCost = 0;
4200 SmallVector<int> ExtToVecMask;
4201 if (!NeedExpOrNarrow) {
4202 // Ignore 'free' identity insertion shuffle.
4203 // TODO: getShuffleCost should return TCC_Free for Identity shuffles.
4204 if (!ShuffleVectorInst::isIdentityMask(Mask, NumSrcElts))
4205 NewCost += TTI.getShuffleCost(SK, DstVecTy, DstVecTy, Mask, CostKind, 0,
4206 nullptr, {DstVec, SrcVec});
4207 } else {
4208 // When creating length-changing-vector, always create with a Mask whose
4209 // first element has an ExtIdx, so that the first element of the vector
4210 // being created is always the target to be extracted.
4211 ExtToVecMask.assign(NumDstElts, PoisonMaskElem);
4212 if (IsExtIdxInBounds)
4213 ExtToVecMask[ExtIdx] = ExtIdx;
4214 else
4215 ExtToVecMask[0] = ExtIdx;
4216 // Add cost for expanding or narrowing
4218 DstVecTy, SrcVecTy, ExtToVecMask, CostKind);
4219 NewCost += TTI.getShuffleCost(SK, DstVecTy, DstVecTy, Mask, CostKind);
4220 }
4221
4222 if (!Ext->hasOneUse())
4223 NewCost += ExtCost;
4224
4225 LLVM_DEBUG(dbgs() << "Found a insert/extract shuffle-like pair: " << I
4226 << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost
4227 << "\n");
4228
4229 if (OldCost < NewCost)
4230 return false;
4231
4232 if (NeedExpOrNarrow) {
4233 if (!NeedDstSrcSwap)
4234 SrcVec = Builder.CreateShuffleVector(SrcVec, ExtToVecMask);
4235 else
4236 DstVec = Builder.CreateShuffleVector(DstVec, ExtToVecMask);
4237 }
4238
4239 // Canonicalize undef param to RHS to help further folds.
4240 if (isa<UndefValue>(DstVec) && !isa<UndefValue>(SrcVec)) {
4241 ShuffleVectorInst::commuteShuffleMask(Mask, NumDstElts);
4242 std::swap(DstVec, SrcVec);
4243 }
4244
4245 Value *Shuf = Builder.CreateShuffleVector(DstVec, SrcVec, Mask);
4246 replaceValue(I, *Shuf);
4247
4248 return true;
4249}
4250
4251/// If we're interleaving 2 constant splats, for instance `<vscale x 8 x i32>
4252/// <splat of 666>` and `<vscale x 8 x i32> <splat of 777>`, we can create a
4253/// larger splat `<vscale x 8 x i64> <splat of ((777 << 32) | 666)>` first
4254/// before casting it back into `<vscale x 16 x i32>`.
4255bool VectorCombine::foldInterleaveIntrinsics(Instruction &I) {
4256 const APInt *SplatVal0, *SplatVal1;
4258 m_APInt(SplatVal0), m_APInt(SplatVal1))))
4259 return false;
4260
4261 LLVM_DEBUG(dbgs() << "VC: Folding interleave2 with two splats: " << I
4262 << "\n");
4263
4264 auto *VTy =
4265 cast<VectorType>(cast<IntrinsicInst>(I).getArgOperand(0)->getType());
4266 auto *ExtVTy = VectorType::getExtendedElementVectorType(VTy);
4267 unsigned Width = VTy->getElementType()->getIntegerBitWidth();
4268
4269 // Just in case the cost of interleave2 intrinsic and bitcast are both
4270 // invalid, in which case we want to bail out, we use <= rather
4271 // than < here. Even they both have valid and equal costs, it's probably
4272 // not a good idea to emit a high-cost constant splat.
4274 TTI.getCastInstrCost(Instruction::BitCast, I.getType(), ExtVTy,
4276 LLVM_DEBUG(dbgs() << "VC: The cost to cast from " << *ExtVTy << " to "
4277 << *I.getType() << " is too high.\n");
4278 return false;
4279 }
4280
4281 APInt NewSplatVal = SplatVal1->zext(Width * 2);
4282 NewSplatVal <<= Width;
4283 NewSplatVal |= SplatVal0->zext(Width * 2);
4284 auto *NewSplat = ConstantVector::getSplat(
4285 ExtVTy->getElementCount(), ConstantInt::get(F.getContext(), NewSplatVal));
4286
4287 IRBuilder<> Builder(&I);
4288 replaceValue(I, *Builder.CreateBitCast(NewSplat, I.getType()));
4289 return true;
4290}
4291
4292// Attempt to shrink loads that are only used by shufflevector instructions.
4293bool VectorCombine::shrinkLoadForShuffles(Instruction &I) {
4294 auto *OldLoad = dyn_cast<LoadInst>(&I);
4295 if (!OldLoad || !OldLoad->isSimple())
4296 return false;
4297
4298 auto *OldLoadTy = dyn_cast<FixedVectorType>(OldLoad->getType());
4299 if (!OldLoadTy)
4300 return false;
4301
4302 unsigned const OldNumElements = OldLoadTy->getNumElements();
4303
4304 // Search all uses of load. If all uses are shufflevector instructions, and
4305 // the second operands are all poison values, find the minimum and maximum
4306 // indices of the vector elements referenced by all shuffle masks.
4307 // Otherwise return `std::nullopt`.
4308 using IndexRange = std::pair<int, int>;
4309 auto GetIndexRangeInShuffles = [&]() -> std::optional<IndexRange> {
4310 IndexRange OutputRange = IndexRange(OldNumElements, -1);
4311 for (llvm::Use &Use : I.uses()) {
4312 // Ensure all uses match the required pattern.
4313 User *Shuffle = Use.getUser();
4314 ArrayRef<int> Mask;
4315
4316 if (!match(Shuffle,
4317 m_Shuffle(m_Specific(OldLoad), m_Undef(), m_Mask(Mask))))
4318 return std::nullopt;
4319
4320 // Ignore shufflevector instructions that have no uses.
4321 if (Shuffle->use_empty())
4322 continue;
4323
4324 // Find the min and max indices used by the shufflevector instruction.
4325 for (int Index : Mask) {
4326 if (Index >= 0 && Index < static_cast<int>(OldNumElements)) {
4327 OutputRange.first = std::min(Index, OutputRange.first);
4328 OutputRange.second = std::max(Index, OutputRange.second);
4329 }
4330 }
4331 }
4332
4333 if (OutputRange.second < OutputRange.first)
4334 return std::nullopt;
4335
4336 return OutputRange;
4337 };
4338
4339 // Get the range of vector elements used by shufflevector instructions.
4340 if (std::optional<IndexRange> Indices = GetIndexRangeInShuffles()) {
4341 unsigned const NewNumElements = Indices->second + 1u;
4342
4343 // If the range of vector elements is smaller than the full load, attempt
4344 // to create a smaller load.
4345 if (NewNumElements < OldNumElements) {
4346 IRBuilder Builder(&I);
4347 Builder.SetCurrentDebugLocation(I.getDebugLoc());
4348
4349 // Calculate costs of old and new ops.
4350 Type *ElemTy = OldLoadTy->getElementType();
4351 FixedVectorType *NewLoadTy = FixedVectorType::get(ElemTy, NewNumElements);
4352 Value *PtrOp = OldLoad->getPointerOperand();
4353
4355 Instruction::Load, OldLoad->getType(), OldLoad->getAlign(),
4356 OldLoad->getPointerAddressSpace(), CostKind);
4357 InstructionCost NewCost =
4358 TTI.getMemoryOpCost(Instruction::Load, NewLoadTy, OldLoad->getAlign(),
4359 OldLoad->getPointerAddressSpace(), CostKind);
4360
4361 using UseEntry = std::pair<ShuffleVectorInst *, std::vector<int>>;
4363 unsigned const MaxIndex = NewNumElements * 2u;
4364
4365 for (llvm::Use &Use : I.uses()) {
4366 auto *Shuffle = cast<ShuffleVectorInst>(Use.getUser());
4367 ArrayRef<int> OldMask = Shuffle->getShuffleMask();
4368
4369 // Create entry for new use.
4370 NewUses.push_back({Shuffle, OldMask});
4371
4372 // Validate mask indices.
4373 for (int Index : OldMask) {
4374 if (Index >= static_cast<int>(MaxIndex))
4375 return false;
4376 }
4377
4378 // Update costs.
4379 OldCost +=
4381 OldLoadTy, OldMask, CostKind);
4382 NewCost +=
4384 NewLoadTy, OldMask, CostKind);
4385 }
4386
4387 LLVM_DEBUG(
4388 dbgs() << "Found a load used only by shufflevector instructions: "
4389 << I << "\n OldCost: " << OldCost
4390 << " vs NewCost: " << NewCost << "\n");
4391
4392 if (OldCost < NewCost || !NewCost.isValid())
4393 return false;
4394
4395 // Create new load of smaller vector.
4396 auto *NewLoad = cast<LoadInst>(
4397 Builder.CreateAlignedLoad(NewLoadTy, PtrOp, OldLoad->getAlign()));
4398 NewLoad->copyMetadata(I);
4399
4400 // Replace all uses.
4401 for (UseEntry &Use : NewUses) {
4402 ShuffleVectorInst *Shuffle = Use.first;
4403 std::vector<int> &NewMask = Use.second;
4404
4405 Builder.SetInsertPoint(Shuffle);
4406 Builder.SetCurrentDebugLocation(Shuffle->getDebugLoc());
4407 Value *NewShuffle = Builder.CreateShuffleVector(
4408 NewLoad, PoisonValue::get(NewLoadTy), NewMask);
4409
4410 replaceValue(*Shuffle, *NewShuffle, false);
4411 }
4412
4413 return true;
4414 }
4415 }
4416 return false;
4417}
4418
4419// Attempt to narrow a phi of shufflevector instructions where the two incoming
4420// values have the same operands but different masks. If the two shuffle masks
4421// are offsets of one another we can use one branch to rotate the incoming
4422// vector and perform one larger shuffle after the phi.
4423bool VectorCombine::shrinkPhiOfShuffles(Instruction &I) {
4424 auto *Phi = dyn_cast<PHINode>(&I);
4425 if (!Phi || Phi->getNumIncomingValues() != 2u)
4426 return false;
4427
4428 Value *Op = nullptr;
4429 ArrayRef<int> Mask0;
4430 ArrayRef<int> Mask1;
4431
4432 if (!match(Phi->getOperand(0u),
4433 m_OneUse(m_Shuffle(m_Value(Op), m_Poison(), m_Mask(Mask0)))) ||
4434 !match(Phi->getOperand(1u),
4435 m_OneUse(m_Shuffle(m_Specific(Op), m_Poison(), m_Mask(Mask1)))))
4436 return false;
4437
4438 auto *Shuf = cast<ShuffleVectorInst>(Phi->getOperand(0u));
4439
4440 // Ensure result vectors are wider than the argument vector.
4441 auto *InputVT = cast<FixedVectorType>(Op->getType());
4442 auto *ResultVT = cast<FixedVectorType>(Shuf->getType());
4443 auto const InputNumElements = InputVT->getNumElements();
4444
4445 if (InputNumElements >= ResultVT->getNumElements())
4446 return false;
4447
4448 // Take the difference of the two shuffle masks at each index. Ignore poison
4449 // values at the same index in both masks.
4450 SmallVector<int, 16> NewMask;
4451 NewMask.reserve(Mask0.size());
4452
4453 for (auto [M0, M1] : zip(Mask0, Mask1)) {
4454 if (M0 >= 0 && M1 >= 0)
4455 NewMask.push_back(M0 - M1);
4456 else if (M0 == -1 && M1 == -1)
4457 continue;
4458 else
4459 return false;
4460 }
4461
4462 // Ensure all elements of the new mask are equal. If the difference between
4463 // the incoming mask elements is the same, the two must be constant offsets
4464 // of one another.
4465 if (NewMask.empty() || !all_equal(NewMask))
4466 return false;
4467
4468 // Create new mask using difference of the two incoming masks.
4469 int MaskOffset = NewMask[0u];
4470 unsigned Index = (InputNumElements + MaskOffset) % InputNumElements;
4471 NewMask.clear();
4472
4473 for (unsigned I = 0u; I < InputNumElements; ++I) {
4474 NewMask.push_back(Index);
4475 Index = (Index + 1u) % InputNumElements;
4476 }
4477
4478 // Calculate costs for worst cases and compare.
4479 auto const Kind = TTI::SK_PermuteSingleSrc;
4480 auto OldCost =
4481 std::max(TTI.getShuffleCost(Kind, ResultVT, InputVT, Mask0, CostKind),
4482 TTI.getShuffleCost(Kind, ResultVT, InputVT, Mask1, CostKind));
4483 auto NewCost = TTI.getShuffleCost(Kind, InputVT, InputVT, NewMask, CostKind) +
4484 TTI.getShuffleCost(Kind, ResultVT, InputVT, Mask1, CostKind);
4485
4486 LLVM_DEBUG(dbgs() << "Found a phi of mergeable shuffles: " << I
4487 << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost
4488 << "\n");
4489
4490 if (NewCost > OldCost)
4491 return false;
4492
4493 // Create new shuffles and narrowed phi.
4494 auto Builder = IRBuilder(Shuf);
4495 Builder.SetCurrentDebugLocation(Shuf->getDebugLoc());
4496 auto *PoisonVal = PoisonValue::get(InputVT);
4497 auto *NewShuf0 = Builder.CreateShuffleVector(Op, PoisonVal, NewMask);
4498 Worklist.push(cast<Instruction>(NewShuf0));
4499
4500 Builder.SetInsertPoint(Phi);
4501 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
4502 auto *NewPhi = Builder.CreatePHI(NewShuf0->getType(), 2u);
4503 NewPhi->addIncoming(NewShuf0, Phi->getIncomingBlock(0u));
4504 NewPhi->addIncoming(Op, Phi->getIncomingBlock(1u));
4505
4506 Builder.SetInsertPoint(*NewPhi->getInsertionPointAfterDef());
4507 PoisonVal = PoisonValue::get(NewPhi->getType());
4508 auto *NewShuf1 = Builder.CreateShuffleVector(NewPhi, PoisonVal, Mask1);
4509
4510 replaceValue(*Phi, *NewShuf1);
4511 return true;
4512}
4513
4514/// This is the entry point for all transforms. Pass manager differences are
4515/// handled in the callers of this function.
4516bool VectorCombine::run() {
4518 return false;
4519
4520 // Don't attempt vectorization if the target does not support vectors.
4521 if (!TTI.getNumberOfRegisters(TTI.getRegisterClassForType(/*Vector*/ true)))
4522 return false;
4523
4524 LLVM_DEBUG(dbgs() << "\n\nVECTORCOMBINE on " << F.getName() << "\n");
4525
4526 auto FoldInst = [this](Instruction &I) {
4527 Builder.SetInsertPoint(&I);
4528 bool IsVectorType = isa<VectorType>(I.getType());
4529 bool IsFixedVectorType = isa<FixedVectorType>(I.getType());
4530 auto Opcode = I.getOpcode();
4531
4532 LLVM_DEBUG(dbgs() << "VC: Visiting: " << I << '\n');
4533
4534 // These folds should be beneficial regardless of when this pass is run
4535 // in the optimization pipeline.
4536 // The type checking is for run-time efficiency. We can avoid wasting time
4537 // dispatching to folding functions if there's no chance of matching.
4538 if (IsFixedVectorType) {
4539 switch (Opcode) {
4540 case Instruction::InsertElement:
4541 if (vectorizeLoadInsert(I))
4542 return true;
4543 break;
4544 case Instruction::ShuffleVector:
4545 if (widenSubvectorLoad(I))
4546 return true;
4547 break;
4548 default:
4549 break;
4550 }
4551 }
4552
4553 // This transform works with scalable and fixed vectors
4554 // TODO: Identify and allow other scalable transforms
4555 if (IsVectorType) {
4556 if (scalarizeOpOrCmp(I))
4557 return true;
4558 if (scalarizeLoadExtract(I))
4559 return true;
4560 if (scalarizeExtExtract(I))
4561 return true;
4562 if (scalarizeVPIntrinsic(I))
4563 return true;
4564 if (foldInterleaveIntrinsics(I))
4565 return true;
4566 }
4567
4568 if (Opcode == Instruction::Store)
4569 if (foldSingleElementStore(I))
4570 return true;
4571
4572 // If this is an early pipeline invocation of this pass, we are done.
4573 if (TryEarlyFoldsOnly)
4574 return false;
4575
4576 // Otherwise, try folds that improve codegen but may interfere with
4577 // early IR canonicalizations.
4578 // The type checking is for run-time efficiency. We can avoid wasting time
4579 // dispatching to folding functions if there's no chance of matching.
4580 if (IsFixedVectorType) {
4581 switch (Opcode) {
4582 case Instruction::InsertElement:
4583 if (foldInsExtFNeg(I))
4584 return true;
4585 if (foldInsExtBinop(I))
4586 return true;
4587 if (foldInsExtVectorToShuffle(I))
4588 return true;
4589 break;
4590 case Instruction::ShuffleVector:
4591 if (foldPermuteOfBinops(I))
4592 return true;
4593 if (foldShuffleOfBinops(I))
4594 return true;
4595 if (foldShuffleOfSelects(I))
4596 return true;
4597 if (foldShuffleOfCastops(I))
4598 return true;
4599 if (foldShuffleOfShuffles(I))
4600 return true;
4601 if (foldShuffleOfIntrinsics(I))
4602 return true;
4603 if (foldSelectShuffle(I))
4604 return true;
4605 if (foldShuffleToIdentity(I))
4606 return true;
4607 break;
4608 case Instruction::Load:
4609 if (shrinkLoadForShuffles(I))
4610 return true;
4611 break;
4612 case Instruction::BitCast:
4613 if (foldBitcastShuffle(I))
4614 return true;
4615 break;
4616 case Instruction::And:
4617 case Instruction::Or:
4618 case Instruction::Xor:
4619 if (foldBitOpOfCastops(I))
4620 return true;
4621 if (foldBitOpOfCastConstant(I))
4622 return true;
4623 break;
4624 case Instruction::PHI:
4625 if (shrinkPhiOfShuffles(I))
4626 return true;
4627 break;
4628 default:
4629 if (shrinkType(I))
4630 return true;
4631 break;
4632 }
4633 } else {
4634 switch (Opcode) {
4635 case Instruction::Call:
4636 if (foldShuffleFromReductions(I))
4637 return true;
4638 if (foldCastFromReductions(I))
4639 return true;
4640 break;
4641 case Instruction::ExtractElement:
4642 if (foldShuffleChainsToReduce(I))
4643 return true;
4644 break;
4645 case Instruction::ICmp:
4646 case Instruction::FCmp:
4647 if (foldExtractExtract(I))
4648 return true;
4649 break;
4650 case Instruction::Or:
4651 if (foldConcatOfBoolMasks(I))
4652 return true;
4653 [[fallthrough]];
4654 default:
4655 if (Instruction::isBinaryOp(Opcode)) {
4656 if (foldExtractExtract(I))
4657 return true;
4658 if (foldExtractedCmps(I))
4659 return true;
4660 if (foldBinopOfReductions(I))
4661 return true;
4662 }
4663 break;
4664 }
4665 }
4666 return false;
4667 };
4668
4669 bool MadeChange = false;
4670 for (BasicBlock &BB : F) {
4671 // Ignore unreachable basic blocks.
4672 if (!DT.isReachableFromEntry(&BB))
4673 continue;
4674 // Use early increment range so that we can erase instructions in loop.
4675 // make_early_inc_range is not applicable here, as the next iterator may
4676 // be invalidated by RecursivelyDeleteTriviallyDeadInstructions.
4677 // We manually maintain the next instruction and update it when it is about
4678 // to be deleted.
4679 Instruction *I = &BB.front();
4680 while (I) {
4681 NextInst = I->getNextNode();
4682 if (!I->isDebugOrPseudoInst())
4683 MadeChange |= FoldInst(*I);
4684 I = NextInst;
4685 }
4686 }
4687
4688 NextInst = nullptr;
4689
4690 while (!Worklist.isEmpty()) {
4691 Instruction *I = Worklist.removeOne();
4692 if (!I)
4693 continue;
4694
4697 continue;
4698 }
4699
4700 MadeChange |= FoldInst(*I);
4701 }
4702
4703 return MadeChange;
4704}
4705
4708 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
4710 DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
4711 AAResults &AA = FAM.getResult<AAManager>(F);
4712 const DataLayout *DL = &F.getDataLayout();
4713 VectorCombine Combiner(F, TTI, DT, AA, AC, DL, TTI::TCK_RecipThroughput,
4714 TryEarlyFoldsOnly);
4715 if (!Combiner.run())
4716 return PreservedAnalyses::all();
4719 return PA;
4720}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< unsigned > MaxInstrsToScan("aggressive-instcombine-max-scan-instrs", cl::init(64), cl::Hidden, cl::desc("Max number of instructions to scan for aggressive instcombine."))
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
This file defines the DenseMap class.
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Definition LICM.cpp:1454
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define T1
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
unsigned OpIndex
This file contains some templates that are useful if you are working with the STL at all.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
static Value * generateNewInstTree(ArrayRef< InstLane > Item, FixedVectorType *Ty, const SmallPtrSet< Use *, 4 > &IdentityLeafs, const SmallPtrSet< Use *, 4 > &SplatLeafs, const SmallPtrSet< Use *, 4 > &ConcatLeafs, IRBuilderBase &Builder, const TargetTransformInfo *TTI)
static bool isFreeConcat(ArrayRef< InstLane > Item, TTI::TargetCostKind CostKind, const TargetTransformInfo &TTI)
Detect concat of multiple values into a vector.
static void analyzeCostOfVecReduction(const IntrinsicInst &II, TTI::TargetCostKind CostKind, const TargetTransformInfo &TTI, InstructionCost &CostBeforeReduction, InstructionCost &CostAfterReduction)
static SmallVector< InstLane > generateInstLaneVectorFromOperand(ArrayRef< InstLane > Item, int Op)
static Value * createShiftShuffle(Value *Vec, unsigned OldIndex, unsigned NewIndex, IRBuilderBase &Builder)
Create a shuffle that translates (shifts) 1 element from the input vector to a new element location.
static Align computeAlignmentAfterScalarization(Align VectorAlignment, Type *ScalarType, Value *Idx, const DataLayout &DL)
The memory operation on a vector of ScalarType had alignment of VectorAlignment.
static bool feedsIntoVectorReduction(ShuffleVectorInst *SVI)
Returns true if this ShuffleVectorInst eventually feeds into a vector reduction intrinsic (e....
static ScalarizationResult canScalarizeAccess(VectorType *VecTy, Value *Idx, Instruction *CtxI, AssumptionCache &AC, const DominatorTree &DT)
Check if it is legal to scalarize a memory access to VecTy at index Idx.
static cl::opt< bool > DisableVectorCombine("disable-vector-combine", cl::init(false), cl::Hidden, cl::desc("Disable all vector combine transforms"))
static InstLane lookThroughShuffles(Use *U, int Lane)
static bool canWidenLoad(LoadInst *Load, const TargetTransformInfo &TTI)
static const unsigned InvalidIndex
std::pair< Use *, int > InstLane
static Value * translateExtract(ExtractElementInst *ExtElt, unsigned NewIndex, IRBuilderBase &Builder)
Given an extract element instruction with constant index operand, shuffle the source vector (shift th...
static cl::opt< unsigned > MaxInstrsToScan("vector-combine-max-scan-instrs", cl::init(30), cl::Hidden, cl::desc("Max number of instructions to scan for vector combining."))
static cl::opt< bool > DisableBinopExtractShuffle("disable-binop-extract-shuffle", cl::init(false), cl::Hidden, cl::desc("Disable binop extract to shuffle transforms"))
static bool isMemModifiedBetween(BasicBlock::iterator Begin, BasicBlock::iterator End, const MemoryLocation &Loc, AAResults &AA)
static constexpr int Concat[]
Value * RHS
Value * LHS
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1012
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:239
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1221
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
const T & front() const
front - Get the first element.
Definition ArrayRef.h:150
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:984
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
bool isFPPredicate() const
Definition InstrTypes.h:784
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
Combiner implementation.
Definition Combiner.h:34
static LLVM_ABI Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
This class represents a range of values.
LLVM_ABI ConstantRange urem(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned remainder operation of...
LLVM_ABI ConstantRange binaryAnd(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a binary-and of a value in this ra...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
This instruction extracts a single (scalar) element from a VectorType value.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * getDoubleElementsVectorType(FixedVectorType *VTy)
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:803
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2571
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2559
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition IRBuilder.h:1864
LLVM_ABI Value * CreateSelectFMF(Value *C, Value *True, Value *False, FMFSource FMFSource, const Twine &Name="", Instruction *MDFrom=nullptr)
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition IRBuilder.h:2637
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1513
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
Definition IRBuilder.h:2238
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition IRBuilder.h:247
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition IRBuilder.h:1931
Value * CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2263
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition IRBuilder.h:527
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2463
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition IRBuilder.h:2494
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Definition IRBuilder.h:172
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2204
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1847
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1492
LLVM_ABI Value * CreateNAryOp(unsigned Opc, ArrayRef< Value * > Ops, const Twine &Name="", MDNode *FPMathTag=nullptr)
Create either a UnaryOperator or BinaryOperator depending on Opc.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2082
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition IRBuilder.h:2593
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1551
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1860
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition IRBuilder.h:2068
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition IRBuilder.h:605
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1708
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateFNegFMF(Value *V, FMFSource FMFSource, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1795
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1573
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
void push(Instruction *I)
Push the instruction onto the worklist stack.
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
bool isBinaryOp() const
LLVM_ABI void setNonNeg(bool b=true)
Set or clear the nneg flag on this instruction, which must be a zext instruction.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
bool isIntDivRem() const
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
An instruction for reading from memory.
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
const SDValue & getOperand(unsigned Num) const
This instruction constructs a fixed permutation of two input vectors.
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
VectorType * getType() const
Overload to return most specific vector type.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
static LLVM_ABI bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
size_type size() const
Definition SmallPtrSet.h:99
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
void setAlignment(Align Align)
Analysis pass providing the TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index=-1, const Value *Op0=nullptr, const Value *Op1=nullptr) const
LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const
Estimate the overhead of scalarizing an instruction.
LLVM_ABI InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo Op1Info={OK_AnyValue, OP_None}, OperandValueInfo Op2Info={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const
LLVM_ABI InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI bool allowVectorElementIndexingUsingGEP() const
Returns true if GEP should not be used to index into vectors for this target.
LLVM_ABI InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask={}, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const
LLVM_ABI InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of vector reduction intrinsics.
LLVM_ABI InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
LLVM_ABI unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
LLVM_ABI InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
LLVM_ABI unsigned getMinVectorRegisterBitWidth() const
LLVM_ABI InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const
LLVM_ABI unsigned getNumberOfRegisters(unsigned ClassID) const
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ None
The cast is not used with a load/store of any kind.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
std::optional< unsigned > getFunctionalIntrinsicID() const
std::optional< unsigned > getFunctionalOpcode() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:759
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition Value.cpp:956
unsigned getValueID() const
Return an ID for the concrete type of this object.
Definition Value.h:543
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition Value.cpp:150
bool use_empty() const
Definition Value.h:346
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
PreservedAnalyses run(Function &F, FunctionAnalysisManager &)
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
Abstract Attribute helper functions.
Definition Attributor.h:165
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinOpPred_match< LHS, RHS, is_bitwiselogic_op, true > m_c_BitwiseLogic(const LHS &L, const RHS &R)
Matches bitwise logic operations in either order.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2038
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1698
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:533
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition ScopeExit.h:59
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2452
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
Definition MathExtras.h:361
LLVM_ABI Value * simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q)
Given operand for a UnaryOperator, fold the result or return null.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:252
LLVM_ABI Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
Definition Loads.cpp:416
LLVM_ABI bool widenShuffleMaskElts(int Scale, ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)
Try to transform a shuffle mask by replacing elements with the scaled index for an equivalent mask of...
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
unsigned M1(unsigned Val)
Definition VE.h:377
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1624
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition Loads.cpp:431
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI void propagateIRFlags(Value *I, ArrayRef< Value * > VL, Value *OpValue=nullptr, bool IncludeWrapFlags=true)
Get the intersection (logical and) of all of the potential IR flags of each scalar operation (VL) tha...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
MutableArrayRef(T &OneElt) -> MutableArrayRef< T >
constexpr int PoisonMaskElem
LLVM_ABI bool isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
@ Other
Any other memory.
Definition ModRef.h:68
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
LLVM_ABI void narrowShuffleMaskElts(int Scale, ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)
Replace each shuffle mask index with the scaled sequential indices for an equivalent mask of narrowed...
LLVM_ABI Intrinsic::ID getReductionForBinop(Instruction::BinaryOps Opc)
Returns the reduction intrinsic id corresponding to the binary operation.
@ And
Bitwise or logical AND of integers.
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
DWARFExpression::Operation Op
unsigned M0(unsigned Val)
Definition VE.h:376
constexpr unsigned BitWidth
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1877
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2088
LLVM_ABI Value * simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a CmpInst, fold the result or return null.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicID(Intrinsic::ID IID)
Returns the llvm.vector.reduce min/max intrinsic that corresponds to the intrinsic op.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
LLVM_ABI AAMDNodes adjustForAccess(unsigned AccessSize)
Create a new AAMDNode for accessing AccessSize bytes of this AAMDNode.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
Definition KnownBits.h:296
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition KnownBits.h:145