LLVM  16.0.0git
SLPVectorizer.cpp
Go to the documentation of this file.
1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
13 //
14 // The pass is inspired by the work described in the paper:
15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16 //
17 //===----------------------------------------------------------------------===//
18 
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/DenseSet.h"
22 #include "llvm/ADT/Optional.h"
24 #include "llvm/ADT/PriorityQueue.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SetOperations.h"
27 #include "llvm/ADT/SetVector.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/SmallSet.h"
31 #include "llvm/ADT/SmallString.h"
32 #include "llvm/ADT/Statistic.h"
33 #include "llvm/ADT/iterator.h"
42 #include "llvm/Analysis/LoopInfo.h"
51 #include "llvm/IR/Attributes.h"
52 #include "llvm/IR/BasicBlock.h"
53 #include "llvm/IR/Constant.h"
54 #include "llvm/IR/Constants.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DerivedTypes.h"
57 #include "llvm/IR/Dominators.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/IRBuilder.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/IR/ValueHandle.h"
73 #ifdef EXPENSIVE_CHECKS
74 #include "llvm/IR/Verifier.h"
75 #endif
76 #include "llvm/Pass.h"
77 #include "llvm/Support/Casting.h"
79 #include "llvm/Support/Compiler.h"
81 #include "llvm/Support/Debug.h"
85 #include "llvm/Support/KnownBits.h"
92 #include <algorithm>
93 #include <cassert>
94 #include <cstdint>
95 #include <iterator>
96 #include <memory>
97 #include <set>
98 #include <string>
99 #include <tuple>
100 #include <utility>
101 #include <vector>
102 
103 using namespace llvm;
104 using namespace llvm::PatternMatch;
105 using namespace slpvectorizer;
106 
107 #define SV_NAME "slp-vectorizer"
108 #define DEBUG_TYPE "SLP"
109 
110 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
111 
112 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
113  cl::desc("Run the SLP vectorization passes"));
114 
115 static cl::opt<int>
116  SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
117  cl::desc("Only vectorize if you gain more than this "
118  "number "));
119 
120 static cl::opt<bool>
121 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
122  cl::desc("Attempt to vectorize horizontal reductions"));
123 
125  "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
126  cl::desc(
127  "Attempt to vectorize horizontal reductions feeding into a store"));
128 
129 static cl::opt<int>
130 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
131  cl::desc("Attempt to vectorize for this register size in bits"));
132 
133 static cl::opt<unsigned>
134 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden,
135  cl::desc("Maximum SLP vectorization factor (0=unlimited)"));
136 
137 static cl::opt<int>
138 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden,
139  cl::desc("Maximum depth of the lookup for consecutive stores."));
140 
141 /// Limits the size of scheduling regions in a block.
142 /// It avoid long compile times for _very_ large blocks where vector
143 /// instructions are spread over a wide range.
144 /// This limit is way higher than needed by real-world functions.
145 static cl::opt<int>
146 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
147  cl::desc("Limit the size of the SLP scheduling region per block"));
148 
150  "slp-min-reg-size", cl::init(128), cl::Hidden,
151  cl::desc("Attempt to vectorize for this register size in bits"));
152 
154  "slp-recursion-max-depth", cl::init(12), cl::Hidden,
155  cl::desc("Limit the recursion depth when building a vectorizable tree"));
156 
158  "slp-min-tree-size", cl::init(3), cl::Hidden,
159  cl::desc("Only vectorize small trees if they are fully vectorizable"));
160 
161 // The maximum depth that the look-ahead score heuristic will explore.
162 // The higher this value, the higher the compilation time overhead.
164  "slp-max-look-ahead-depth", cl::init(2), cl::Hidden,
165  cl::desc("The maximum look-ahead depth for operand reordering scores"));
166 
167 // The maximum depth that the look-ahead score heuristic will explore
168 // when it probing among candidates for vectorization tree roots.
169 // The higher this value, the higher the compilation time overhead but unlike
170 // similar limit for operands ordering this is less frequently used, hence
171 // impact of higher value is less noticeable.
173  "slp-max-root-look-ahead-depth", cl::init(2), cl::Hidden,
174  cl::desc("The maximum look-ahead depth for searching best rooting option"));
175 
176 static cl::opt<bool>
177  ViewSLPTree("view-slp-tree", cl::Hidden,
178  cl::desc("Display the SLP trees with Graphviz"));
179 
180 // Limit the number of alias checks. The limit is chosen so that
181 // it has no negative effect on the llvm benchmarks.
182 static const unsigned AliasedCheckLimit = 10;
183 
184 // Another limit for the alias checks: The maximum distance between load/store
185 // instructions where alias checks are done.
186 // This limit is useful for very large basic blocks.
187 static const unsigned MaxMemDepDistance = 160;
188 
189 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
190 /// regions to be handled.
191 static const int MinScheduleRegionSize = 16;
192 
193 /// Predicate for the element types that the SLP vectorizer supports.
194 ///
195 /// The most important thing to filter here are types which are invalid in LLVM
196 /// vectors. We also filter target specific types which have absolutely no
197 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
198 /// avoids spending time checking the cost model and realizing that they will
199 /// be inevitably scalarized.
200 static bool isValidElementType(Type *Ty) {
201  return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
202  !Ty->isPPC_FP128Ty();
203 }
204 
205 /// \returns True if the value is a constant (but not globals/constant
206 /// expressions).
207 static bool isConstant(Value *V) {
208  return isa<Constant>(V) && !isa<ConstantExpr, GlobalValue>(V);
209 }
210 
211 /// Checks if \p V is one of vector-like instructions, i.e. undef,
212 /// insertelement/extractelement with constant indices for fixed vector type or
213 /// extractvalue instruction.
215  if (!isa<InsertElementInst, ExtractElementInst>(V) &&
216  !isa<ExtractValueInst, UndefValue>(V))
217  return false;
218  auto *I = dyn_cast<Instruction>(V);
219  if (!I || isa<ExtractValueInst>(I))
220  return true;
221  if (!isa<FixedVectorType>(I->getOperand(0)->getType()))
222  return false;
223  if (isa<ExtractElementInst>(I))
224  return isConstant(I->getOperand(1));
225  assert(isa<InsertElementInst>(V) && "Expected only insertelement.");
226  return isConstant(I->getOperand(2));
227 }
228 
229 /// \returns true if all of the instructions in \p VL are in the same block or
230 /// false otherwise.
232  Instruction *I0 = dyn_cast<Instruction>(VL[0]);
233  if (!I0)
234  return false;
236  return true;
237 
238  BasicBlock *BB = I0->getParent();
239  for (int I = 1, E = VL.size(); I < E; I++) {
240  auto *II = dyn_cast<Instruction>(VL[I]);
241  if (!II)
242  return false;
243 
244  if (BB != II->getParent())
245  return false;
246  }
247  return true;
248 }
249 
250 /// \returns True if all of the values in \p VL are constants (but not
251 /// globals/constant expressions).
253  // Constant expressions and globals can't be vectorized like normal integer/FP
254  // constants.
255  return all_of(VL, isConstant);
256 }
257 
258 /// \returns True if all of the values in \p VL are identical or some of them
259 /// are UndefValue.
260 static bool isSplat(ArrayRef<Value *> VL) {
261  Value *FirstNonUndef = nullptr;
262  for (Value *V : VL) {
263  if (isa<UndefValue>(V))
264  continue;
265  if (!FirstNonUndef) {
266  FirstNonUndef = V;
267  continue;
268  }
269  if (V != FirstNonUndef)
270  return false;
271  }
272  return FirstNonUndef != nullptr;
273 }
274 
275 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator.
276 static bool isCommutative(Instruction *I) {
277  if (auto *Cmp = dyn_cast<CmpInst>(I))
278  return Cmp->isCommutative();
279  if (auto *BO = dyn_cast<BinaryOperator>(I))
280  return BO->isCommutative();
281  // TODO: This should check for generic Instruction::isCommutative(), but
282  // we need to confirm that the caller code correctly handles Intrinsics
283  // for example (does not have 2 operands).
284  return false;
285 }
286 
287 /// \returns inserting index of InsertElement or InsertValue instruction,
288 /// using Offset as base offset for index.
289 static Optional<unsigned> getInsertIndex(const Value *InsertInst,
290  unsigned Offset = 0) {
291  int Index = Offset;
292  if (const auto *IE = dyn_cast<InsertElementInst>(InsertInst)) {
293  if (const auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) {
294  auto *VT = cast<FixedVectorType>(IE->getType());
295  if (CI->getValue().uge(VT->getNumElements()))
296  return None;
297  Index *= VT->getNumElements();
298  Index += CI->getZExtValue();
299  return Index;
300  }
301  return None;
302  }
303 
304  const auto *IV = cast<InsertValueInst>(InsertInst);
305  Type *CurrentType = IV->getType();
306  for (unsigned I : IV->indices()) {
307  if (const auto *ST = dyn_cast<StructType>(CurrentType)) {
308  Index *= ST->getNumElements();
309  CurrentType = ST->getElementType(I);
310  } else if (const auto *AT = dyn_cast<ArrayType>(CurrentType)) {
311  Index *= AT->getNumElements();
312  CurrentType = AT->getElementType();
313  } else {
314  return None;
315  }
316  Index += I;
317  }
318  return Index;
319 }
320 
321 /// Checks if the given value is actually an undefined constant vector.
322 /// Also, if the\p ShuffleMask is not empty, tries to check if the non-masked
323 /// elements actually mask the insertelement buildvector, if any.
324 static bool isUndefVector(const Value *V, ArrayRef<int> ShuffleMask = None) {
325  if (isa<UndefValue>(V))
326  return true;
327  auto *VecTy = dyn_cast<FixedVectorType>(V->getType());
328  if (!VecTy)
329  return false;
330  auto *C = dyn_cast<Constant>(V);
331  if (!C) {
332  if (!ShuffleMask.empty()) {
333  const Value *Base = V;
334  while (auto *II = dyn_cast<InsertElementInst>(Base)) {
335  Base = II->getOperand(0);
337  if (!Idx)
338  continue;
339  if (*Idx < ShuffleMask.size() && ShuffleMask[*Idx] == UndefMaskElem)
340  return false;
341  }
342  return V != Base && isUndefVector(Base);
343  }
344  return false;
345  }
346  for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) {
347  if (Constant *Elem = C->getAggregateElement(I))
348  if (!isa<UndefValue>(Elem) &&
349  (ShuffleMask.empty() ||
350  (I < ShuffleMask.size() && ShuffleMask[I] == UndefMaskElem)))
351  return false;
352  }
353  return true;
354 }
355 
356 /// Checks if the vector of instructions can be represented as a shuffle, like:
357 /// %x0 = extractelement <4 x i8> %x, i32 0
358 /// %x3 = extractelement <4 x i8> %x, i32 3
359 /// %y1 = extractelement <4 x i8> %y, i32 1
360 /// %y2 = extractelement <4 x i8> %y, i32 2
361 /// %x0x0 = mul i8 %x0, %x0
362 /// %x3x3 = mul i8 %x3, %x3
363 /// %y1y1 = mul i8 %y1, %y1
364 /// %y2y2 = mul i8 %y2, %y2
365 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0
366 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
367 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
368 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
369 /// ret <4 x i8> %ins4
370 /// can be transformed into:
371 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
372 /// i32 6>
373 /// %2 = mul <4 x i8> %1, %1
374 /// ret <4 x i8> %2
375 /// We convert this initially to something like:
376 /// %x0 = extractelement <4 x i8> %x, i32 0
377 /// %x3 = extractelement <4 x i8> %x, i32 3
378 /// %y1 = extractelement <4 x i8> %y, i32 1
379 /// %y2 = extractelement <4 x i8> %y, i32 2
380 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0
381 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1
382 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2
383 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3
384 /// %5 = mul <4 x i8> %4, %4
385 /// %6 = extractelement <4 x i8> %5, i32 0
386 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0
387 /// %7 = extractelement <4 x i8> %5, i32 1
388 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1
389 /// %8 = extractelement <4 x i8> %5, i32 2
390 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2
391 /// %9 = extractelement <4 x i8> %5, i32 3
392 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3
393 /// ret <4 x i8> %ins4
394 /// InstCombiner transforms this into a shuffle and vector mul
395 /// Mask will return the Shuffle Mask equivalent to the extracted elements.
396 /// TODO: Can we split off and reuse the shuffle mask detection from
397 /// ShuffleVectorInst/getShuffleCost?
400  const auto *It =
401  find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); });
402  if (It == VL.end())
403  return None;
404  auto *EI0 = cast<ExtractElementInst>(*It);
405  if (isa<ScalableVectorType>(EI0->getVectorOperandType()))
406  return None;
407  unsigned Size =
408  cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements();
409  Value *Vec1 = nullptr;
410  Value *Vec2 = nullptr;
411  enum ShuffleMode { Unknown, Select, Permute };
412  ShuffleMode CommonShuffleMode = Unknown;
413  Mask.assign(VL.size(), UndefMaskElem);
414  for (unsigned I = 0, E = VL.size(); I < E; ++I) {
415  // Undef can be represented as an undef element in a vector.
416  if (isa<UndefValue>(VL[I]))
417  continue;
418  auto *EI = cast<ExtractElementInst>(VL[I]);
419  if (isa<ScalableVectorType>(EI->getVectorOperandType()))
420  return None;
421  auto *Vec = EI->getVectorOperand();
422  // We can extractelement from undef or poison vector.
423  if (isUndefVector(Vec))
424  continue;
425  // All vector operands must have the same number of vector elements.
426  if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size)
427  return None;
428  if (isa<UndefValue>(EI->getIndexOperand()))
429  continue;
430  auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
431  if (!Idx)
432  return None;
433  // Undefined behavior if Idx is negative or >= Size.
434  if (Idx->getValue().uge(Size))
435  continue;
436  unsigned IntIdx = Idx->getValue().getZExtValue();
437  Mask[I] = IntIdx;
438  // For correct shuffling we have to have at most 2 different vector operands
439  // in all extractelement instructions.
440  if (!Vec1 || Vec1 == Vec) {
441  Vec1 = Vec;
442  } else if (!Vec2 || Vec2 == Vec) {
443  Vec2 = Vec;
444  Mask[I] += Size;
445  } else {
446  return None;
447  }
448  if (CommonShuffleMode == Permute)
449  continue;
450  // If the extract index is not the same as the operation number, it is a
451  // permutation.
452  if (IntIdx != I) {
453  CommonShuffleMode = Permute;
454  continue;
455  }
456  CommonShuffleMode = Select;
457  }
458  // If we're not crossing lanes in different vectors, consider it as blending.
459  if (CommonShuffleMode == Select && Vec2)
461  // If Vec2 was never used, we have a permutation of a single vector, otherwise
462  // we have permutation of 2 vectors.
465 }
466 
467 namespace {
468 
469 /// Main data required for vectorization of instructions.
470 struct InstructionsState {
471  /// The very first instruction in the list with the main opcode.
472  Value *OpValue = nullptr;
473 
474  /// The main/alternate instruction.
475  Instruction *MainOp = nullptr;
476  Instruction *AltOp = nullptr;
477 
478  /// The main/alternate opcodes for the list of instructions.
479  unsigned getOpcode() const {
480  return MainOp ? MainOp->getOpcode() : 0;
481  }
482 
483  unsigned getAltOpcode() const {
484  return AltOp ? AltOp->getOpcode() : 0;
485  }
486 
487  /// Some of the instructions in the list have alternate opcodes.
488  bool isAltShuffle() const { return AltOp != MainOp; }
489 
490  bool isOpcodeOrAlt(Instruction *I) const {
491  unsigned CheckedOpcode = I->getOpcode();
492  return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode;
493  }
494 
495  InstructionsState() = delete;
496  InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp)
497  : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {}
498 };
499 
500 } // end anonymous namespace
501 
502 /// Chooses the correct key for scheduling data. If \p Op has the same (or
503 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p
504 /// OpValue.
505 static Value *isOneOf(const InstructionsState &S, Value *Op) {
506  auto *I = dyn_cast<Instruction>(Op);
507  if (I && S.isOpcodeOrAlt(I))
508  return Op;
509  return S.OpValue;
510 }
511 
512 /// \returns true if \p Opcode is allowed as part of of the main/alternate
513 /// instruction for SLP vectorization.
514 ///
515 /// Example of unsupported opcode is SDIV that can potentially cause UB if the
516 /// "shuffled out" lane would result in division by zero.
517 static bool isValidForAlternation(unsigned Opcode) {
518  if (Instruction::isIntDivRem(Opcode))
519  return false;
520 
521  return true;
522 }
523 
524 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
525  unsigned BaseIndex = 0);
526 
527 /// Checks if the provided operands of 2 cmp instructions are compatible, i.e.
528 /// compatible instructions or constants, or just some other regular values.
529 static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0,
530  Value *Op1) {
531  return (isConstant(BaseOp0) && isConstant(Op0)) ||
532  (isConstant(BaseOp1) && isConstant(Op1)) ||
533  (!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) &&
534  !isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) ||
535  getSameOpcode({BaseOp0, Op0}).getOpcode() ||
536  getSameOpcode({BaseOp1, Op1}).getOpcode();
537 }
538 
539 /// \returns true if a compare instruction \p CI has similar "look" and
540 /// same predicate as \p BaseCI, "as is" or with its operands and predicate
541 /// swapped, false otherwise.
542 static bool isCmpSameOrSwapped(const CmpInst *BaseCI, const CmpInst *CI) {
543  assert(BaseCI->getOperand(0)->getType() == CI->getOperand(0)->getType() &&
544  "Assessing comparisons of different types?");
545  CmpInst::Predicate BasePred = BaseCI->getPredicate();
546  CmpInst::Predicate Pred = CI->getPredicate();
548 
549  Value *BaseOp0 = BaseCI->getOperand(0);
550  Value *BaseOp1 = BaseCI->getOperand(1);
551  Value *Op0 = CI->getOperand(0);
552  Value *Op1 = CI->getOperand(1);
553 
554  return (BasePred == Pred &&
555  areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1)) ||
556  (BasePred == SwappedPred &&
557  areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0));
558 }
559 
560 /// \returns analysis of the Instructions in \p VL described in
561 /// InstructionsState, the Opcode that we suppose the whole list
562 /// could be vectorized even if its structure is diverse.
563 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
564  unsigned BaseIndex) {
565  // Make sure these are all Instructions.
566  if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); }))
567  return InstructionsState(VL[BaseIndex], nullptr, nullptr);
568 
569  bool IsCastOp = isa<CastInst>(VL[BaseIndex]);
570  bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]);
571  bool IsCmpOp = isa<CmpInst>(VL[BaseIndex]);
572  CmpInst::Predicate BasePred =
573  IsCmpOp ? cast<CmpInst>(VL[BaseIndex])->getPredicate()
575  unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode();
576  unsigned AltOpcode = Opcode;
577  unsigned AltIndex = BaseIndex;
578 
579  // Check for one alternate opcode from another BinaryOperator.
580  // TODO - generalize to support all operators (types, calls etc.).
581  for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
582  unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode();
583  if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) {
584  if (InstOpcode == Opcode || InstOpcode == AltOpcode)
585  continue;
586  if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) &&
587  isValidForAlternation(Opcode)) {
588  AltOpcode = InstOpcode;
589  AltIndex = Cnt;
590  continue;
591  }
592  } else if (IsCastOp && isa<CastInst>(VL[Cnt])) {
593  Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType();
594  Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType();
595  if (Ty0 == Ty1) {
596  if (InstOpcode == Opcode || InstOpcode == AltOpcode)
597  continue;
598  if (Opcode == AltOpcode) {
599  assert(isValidForAlternation(Opcode) &&
600  isValidForAlternation(InstOpcode) &&
601  "Cast isn't safe for alternation, logic needs to be updated!");
602  AltOpcode = InstOpcode;
603  AltIndex = Cnt;
604  continue;
605  }
606  }
607  } else if (auto *Inst = dyn_cast<CmpInst>(VL[Cnt]); Inst && IsCmpOp) {
608  auto *BaseInst = cast<CmpInst>(VL[BaseIndex]);
609  Type *Ty0 = BaseInst->getOperand(0)->getType();
610  Type *Ty1 = Inst->getOperand(0)->getType();
611  if (Ty0 == Ty1) {
612  assert(InstOpcode == Opcode && "Expected same CmpInst opcode.");
613  // Check for compatible operands. If the corresponding operands are not
614  // compatible - need to perform alternate vectorization.
615  CmpInst::Predicate CurrentPred = Inst->getPredicate();
616  CmpInst::Predicate SwappedCurrentPred =
617  CmpInst::getSwappedPredicate(CurrentPred);
618 
619  if (E == 2 &&
620  (BasePred == CurrentPred || BasePred == SwappedCurrentPred))
621  continue;
622 
623  if (isCmpSameOrSwapped(BaseInst, Inst))
624  continue;
625  auto *AltInst = cast<CmpInst>(VL[AltIndex]);
626  if (AltIndex != BaseIndex) {
627  if (isCmpSameOrSwapped(AltInst, Inst))
628  continue;
629  } else if (BasePred != CurrentPred) {
630  assert(
631  isValidForAlternation(InstOpcode) &&
632  "CmpInst isn't safe for alternation, logic needs to be updated!");
633  AltIndex = Cnt;
634  continue;
635  }
636  CmpInst::Predicate AltPred = AltInst->getPredicate();
637  if (BasePred == CurrentPred || BasePred == SwappedCurrentPred ||
638  AltPred == CurrentPred || AltPred == SwappedCurrentPred)
639  continue;
640  }
641  } else if (InstOpcode == Opcode || InstOpcode == AltOpcode)
642  continue;
643  return InstructionsState(VL[BaseIndex], nullptr, nullptr);
644  }
645 
646  return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]),
647  cast<Instruction>(VL[AltIndex]));
648 }
649 
650 /// \returns true if all of the values in \p VL have the same type or false
651 /// otherwise.
653  Type *Ty = VL[0]->getType();
654  for (int i = 1, e = VL.size(); i < e; i++)
655  if (VL[i]->getType() != Ty)
656  return false;
657 
658  return true;
659 }
660 
661 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
663  unsigned Opcode = E->getOpcode();
664  assert((Opcode == Instruction::ExtractElement ||
665  Opcode == Instruction::ExtractValue) &&
666  "Expected extractelement or extractvalue instruction.");
667  if (Opcode == Instruction::ExtractElement) {
668  auto *CI = dyn_cast<ConstantInt>(E->getOperand(1));
669  if (!CI)
670  return None;
671  return CI->getZExtValue();
672  }
673  ExtractValueInst *EI = cast<ExtractValueInst>(E);
674  if (EI->getNumIndices() != 1)
675  return None;
676  return *EI->idx_begin();
677 }
678 
679 /// \returns True if in-tree use also needs extract. This refers to
680 /// possible scalar operand in vectorized instruction.
681 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
682  TargetLibraryInfo *TLI) {
683  unsigned Opcode = UserInst->getOpcode();
684  switch (Opcode) {
685  case Instruction::Load: {
686  LoadInst *LI = cast<LoadInst>(UserInst);
687  return (LI->getPointerOperand() == Scalar);
688  }
689  case Instruction::Store: {
690  StoreInst *SI = cast<StoreInst>(UserInst);
691  return (SI->getPointerOperand() == Scalar);
692  }
693  case Instruction::Call: {
694  CallInst *CI = cast<CallInst>(UserInst);
696  for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
698  return (CI->getArgOperand(i) == Scalar);
699  }
700  [[fallthrough]];
701  }
702  default:
703  return false;
704  }
705 }
706 
707 /// \returns the AA location that is being access by the instruction.
709  if (StoreInst *SI = dyn_cast<StoreInst>(I))
710  return MemoryLocation::get(SI);
711  if (LoadInst *LI = dyn_cast<LoadInst>(I))
712  return MemoryLocation::get(LI);
713  return MemoryLocation();
714 }
715 
716 /// \returns True if the instruction is not a volatile or atomic load/store.
717 static bool isSimple(Instruction *I) {
718  if (LoadInst *LI = dyn_cast<LoadInst>(I))
719  return LI->isSimple();
720  if (StoreInst *SI = dyn_cast<StoreInst>(I))
721  return SI->isSimple();
722  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
723  return !MI->isVolatile();
724  return true;
725 }
726 
727 /// Shuffles \p Mask in accordance with the given \p SubMask.
729  if (SubMask.empty())
730  return;
731  if (Mask.empty()) {
732  Mask.append(SubMask.begin(), SubMask.end());
733  return;
734  }
735  SmallVector<int> NewMask(SubMask.size(), UndefMaskElem);
736  int TermValue = std::min(Mask.size(), SubMask.size());
737  for (int I = 0, E = SubMask.size(); I < E; ++I) {
738  if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem ||
739  Mask[SubMask[I]] >= TermValue)
740  continue;
741  NewMask[I] = Mask[SubMask[I]];
742  }
743  Mask.swap(NewMask);
744 }
745 
746 /// Order may have elements assigned special value (size) which is out of
747 /// bounds. Such indices only appear on places which correspond to undef values
748 /// (see canReuseExtract for details) and used in order to avoid undef values
749 /// have effect on operands ordering.
750 /// The first loop below simply finds all unused indices and then the next loop
751 /// nest assigns these indices for undef values positions.
752 /// As an example below Order has two undef positions and they have assigned
753 /// values 3 and 7 respectively:
754 /// before: 6 9 5 4 9 2 1 0
755 /// after: 6 3 5 4 7 2 1 0
757  const unsigned Sz = Order.size();
758  SmallBitVector UnusedIndices(Sz, /*t=*/true);
759  SmallBitVector MaskedIndices(Sz);
760  for (unsigned I = 0; I < Sz; ++I) {
761  if (Order[I] < Sz)
762  UnusedIndices.reset(Order[I]);
763  else
764  MaskedIndices.set(I);
765  }
766  if (MaskedIndices.none())
767  return;
768  assert(UnusedIndices.count() == MaskedIndices.count() &&
769  "Non-synced masked/available indices.");
770  int Idx = UnusedIndices.find_first();
771  int MIdx = MaskedIndices.find_first();
772  while (MIdx >= 0) {
773  assert(Idx >= 0 && "Indices must be synced.");
774  Order[MIdx] = Idx;
775  Idx = UnusedIndices.find_next(Idx);
776  MIdx = MaskedIndices.find_next(MIdx);
777  }
778 }
779 
780 namespace llvm {
781 
784  Mask.clear();
785  const unsigned E = Indices.size();
786  Mask.resize(E, UndefMaskElem);
787  for (unsigned I = 0; I < E; ++I)
788  Mask[Indices[I]] = I;
789 }
790 
791 /// Reorders the list of scalars in accordance with the given \p Mask.
794  assert(!Mask.empty() && "Expected non-empty mask.");
795  SmallVector<Value *> Prev(Scalars.size(),
796  UndefValue::get(Scalars.front()->getType()));
797  Prev.swap(Scalars);
798  for (unsigned I = 0, E = Prev.size(); I < E; ++I)
799  if (Mask[I] != UndefMaskElem)
800  Scalars[Mask[I]] = Prev[I];
801 }
802 
803 /// Checks if the provided value does not require scheduling. It does not
804 /// require scheduling if this is not an instruction or it is an instruction
805 /// that does not read/write memory and all operands are either not instructions
806 /// or phi nodes or instructions from different blocks.
807 static bool areAllOperandsNonInsts(Value *V) {
808  auto *I = dyn_cast<Instruction>(V);
809  if (!I)
810  return true;
811  return !mayHaveNonDefUseDependency(*I) &&
812  all_of(I->operands(), [I](Value *V) {
813  auto *IO = dyn_cast<Instruction>(V);
814  if (!IO)
815  return true;
816  return isa<PHINode>(IO) || IO->getParent() != I->getParent();
817  });
818 }
819 
820 /// Checks if the provided value does not require scheduling. It does not
821 /// require scheduling if this is not an instruction or it is an instruction
822 /// that does not read/write memory and all users are phi nodes or instructions
823 /// from the different blocks.
824 static bool isUsedOutsideBlock(Value *V) {
825  auto *I = dyn_cast<Instruction>(V);
826  if (!I)
827  return true;
828  // Limits the number of uses to save compile time.
829  constexpr int UsesLimit = 8;
830  return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) &&
831  all_of(I->users(), [I](User *U) {
832  auto *IU = dyn_cast<Instruction>(U);
833  if (!IU)
834  return true;
835  return IU->getParent() != I->getParent() || isa<PHINode>(IU);
836  });
837 }
838 
839 /// Checks if the specified value does not require scheduling. It does not
840 /// require scheduling if all operands and all users do not need to be scheduled
841 /// in the current basic block.
844 }
845 
846 /// Checks if the specified array of instructions does not require scheduling.
847 /// It is so if all either instructions have operands that do not require
848 /// scheduling or their users do not require scheduling since they are phis or
849 /// in other basic blocks.
851  return !VL.empty() &&
853 }
854 
855 namespace slpvectorizer {
856 
857 /// Bottom Up SLP Vectorizer.
858 class BoUpSLP {
859  struct TreeEntry;
860  struct ScheduleData;
861 
862 public:
870 
872  TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li,
875  : BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li),
876  DT(Dt), AC(AC), DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) {
877  CodeMetrics::collectEphemeralValues(F, AC, EphValues);
878  // Use the vector register size specified by the target unless overridden
879  // by a command-line option.
880  // TODO: It would be better to limit the vectorization factor based on
881  // data type rather than just register size. For example, x86 AVX has
882  // 256-bit registers, but it does not support integer operations
883  // at that width (that requires AVX2).
884  if (MaxVectorRegSizeOption.getNumOccurrences())
885  MaxVecRegSize = MaxVectorRegSizeOption;
886  else
887  MaxVecRegSize =
889  .getFixedSize();
890 
891  if (MinVectorRegSizeOption.getNumOccurrences())
892  MinVecRegSize = MinVectorRegSizeOption;
893  else
894  MinVecRegSize = TTI->getMinVectorRegisterBitWidth();
895  }
896 
897  /// Vectorize the tree that starts with the elements in \p VL.
898  /// Returns the vectorized root.
899  Value *vectorizeTree();
900 
901  /// Vectorize the tree but with the list of externally used values \p
902  /// ExternallyUsedValues. Values in this MapVector can be replaced but the
903  /// generated extractvalue instructions.
904  Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues);
905 
906  /// \returns the cost incurred by unwanted spills and fills, caused by
907  /// holding live values over call sites.
908  InstructionCost getSpillCost() const;
909 
910  /// \returns the vectorization cost of the subtree that starts at \p VL.
911  /// A negative number means that this is profitable.
912  InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None);
913 
914  /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
915  /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
916  void buildTree(ArrayRef<Value *> Roots,
917  const SmallDenseSet<Value *> &UserIgnoreLst);
918 
919  /// Construct a vectorizable tree that starts at \p Roots.
920  void buildTree(ArrayRef<Value *> Roots);
921 
922  /// Builds external uses of the vectorized scalars, i.e. the list of
923  /// vectorized scalars to be extracted, their lanes and their scalar users. \p
924  /// ExternallyUsedValues contains additional list of external uses to handle
925  /// vectorization of reductions.
926  void
927  buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {});
928 
929  /// Clear the internal data structures that are created by 'buildTree'.
930  void deleteTree() {
931  VectorizableTree.clear();
932  ScalarToTreeEntry.clear();
933  MustGather.clear();
934  ExternalUses.clear();
935  for (auto &Iter : BlocksSchedules) {
936  BlockScheduling *BS = Iter.second.get();
937  BS->clear();
938  }
939  MinBWs.clear();
940  InstrElementSize.clear();
941  UserIgnoreList = nullptr;
942  }
943 
944  unsigned getTreeSize() const { return VectorizableTree.size(); }
945 
946  /// Perform LICM and CSE on the newly generated gather sequences.
947  void optimizeGatherSequence();
948 
949  /// Checks if the specified gather tree entry \p TE can be represented as a
950  /// shuffled vector entry + (possibly) permutation with other gathers. It
951  /// implements the checks only for possibly ordered scalars (Loads,
952  /// ExtractElement, ExtractValue), which can be part of the graph.
953  Optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE);
954 
955  /// Sort loads into increasing pointers offsets to allow greater clustering.
956  Optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE);
957 
958  /// Gets reordering data for the given tree entry. If the entry is vectorized
959  /// - just return ReorderIndices, otherwise check if the scalars can be
960  /// reordered and return the most optimal order.
961  /// \param TopToBottom If true, include the order of vectorized stores and
962  /// insertelement nodes, otherwise skip them.
963  Optional<OrdersType> getReorderingData(const TreeEntry &TE, bool TopToBottom);
964 
965  /// Reorders the current graph to the most profitable order starting from the
966  /// root node to the leaf nodes. The best order is chosen only from the nodes
967  /// of the same size (vectorization factor). Smaller nodes are considered
968  /// parts of subgraph with smaller VF and they are reordered independently. We
969  /// can make it because we still need to extend smaller nodes to the wider VF
970  /// and we can merge reordering shuffles with the widening shuffles.
971  void reorderTopToBottom();
972 
973  /// Reorders the current graph to the most profitable order starting from
974  /// leaves to the root. It allows to rotate small subgraphs and reduce the
975  /// number of reshuffles if the leaf nodes use the same order. In this case we
976  /// can merge the orders and just shuffle user node instead of shuffling its
977  /// operands. Plus, even the leaf nodes have different orders, it allows to
978  /// sink reordering in the graph closer to the root node and merge it later
979  /// during analysis.
980  void reorderBottomToTop(bool IgnoreReorder = false);
981 
982  /// \return The vector element size in bits to use when vectorizing the
983  /// expression tree ending at \p V. If V is a store, the size is the width of
984  /// the stored value. Otherwise, the size is the width of the largest loaded
985  /// value reaching V. This method is used by the vectorizer to calculate
986  /// vectorization factors.
987  unsigned getVectorElementSize(Value *V);
988 
989  /// Compute the minimum type sizes required to represent the entries in a
990  /// vectorizable tree.
992 
993  // \returns maximum vector register size as set by TTI or overridden by cl::opt.
994  unsigned getMaxVecRegSize() const {
995  return MaxVecRegSize;
996  }
997 
998  // \returns minimum vector register size as set by cl::opt.
999  unsigned getMinVecRegSize() const {
1000  return MinVecRegSize;
1001  }
1002 
1003  unsigned getMinVF(unsigned Sz) const {
1004  return std::max(2U, getMinVecRegSize() / Sz);
1005  }
1006 
1007  unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
1008  unsigned MaxVF = MaxVFOption.getNumOccurrences() ?
1009  MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode);
1010  return MaxVF ? MaxVF : UINT_MAX;
1011  }
1012 
1013  /// Check if homogeneous aggregate is isomorphic to some VectorType.
1014  /// Accepts homogeneous multidimensional aggregate of scalars/vectors like
1015  /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> },
1016  /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on.
1017  ///
1018  /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
1019  unsigned canMapToVector(Type *T, const DataLayout &DL) const;
1020 
1021  /// \returns True if the VectorizableTree is both tiny and not fully
1022  /// vectorizable. We do not vectorize such trees.
1023  bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const;
1024 
1025  /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values
1026  /// can be load combined in the backend. Load combining may not be allowed in
1027  /// the IR optimizer, so we do not want to alter the pattern. For example,
1028  /// partially transforming a scalar bswap() pattern into vector code is
1029  /// effectively impossible for the backend to undo.
1030  /// TODO: If load combining is allowed in the IR optimizer, this analysis
1031  /// may not be necessary.
1032  bool isLoadCombineReductionCandidate(RecurKind RdxKind) const;
1033 
1034  /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values
1035  /// can be load combined in the backend. Load combining may not be allowed in
1036  /// the IR optimizer, so we do not want to alter the pattern. For example,
1037  /// partially transforming a scalar bswap() pattern into vector code is
1038  /// effectively impossible for the backend to undo.
1039  /// TODO: If load combining is allowed in the IR optimizer, this analysis
1040  /// may not be necessary.
1041  bool isLoadCombineCandidate() const;
1042 
1044 
1045  /// This structure holds any data we need about the edges being traversed
1046  /// during buildTree_rec(). We keep track of:
1047  /// (i) the user TreeEntry index, and
1048  /// (ii) the index of the edge.
1049  struct EdgeInfo {
1050  EdgeInfo() = default;
1051  EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx)
1052  : UserTE(UserTE), EdgeIdx(EdgeIdx) {}
1053  /// The user TreeEntry.
1054  TreeEntry *UserTE = nullptr;
1055  /// The operand index of the use.
1056  unsigned EdgeIdx = UINT_MAX;
1057 #ifndef NDEBUG
1058  friend inline raw_ostream &operator<<(raw_ostream &OS,
1059  const BoUpSLP::EdgeInfo &EI) {
1060  EI.dump(OS);
1061  return OS;
1062  }
1063  /// Debug print.
1064  void dump(raw_ostream &OS) const {
1065  OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null")
1066  << " EdgeIdx:" << EdgeIdx << "}";
1067  }
1068  LLVM_DUMP_METHOD void dump() const { dump(dbgs()); }
1069 #endif
1070  };
1071 
1072  /// A helper class used for scoring candidates for two consecutive lanes.
1074  const DataLayout &DL;
1075  ScalarEvolution &SE;
1076  const BoUpSLP &R;
1077  int NumLanes; // Total number of lanes (aka vectorization factor).
1078  int MaxLevel; // The maximum recursion depth for accumulating score.
1079 
1080  public:
1082  const BoUpSLP &R, int NumLanes, int MaxLevel)
1083  : DL(DL), SE(SE), R(R), NumLanes(NumLanes), MaxLevel(MaxLevel) {}
1084 
1085  // The hard-coded scores listed here are not very important, though it shall
1086  // be higher for better matches to improve the resulting cost. When
1087  // computing the scores of matching one sub-tree with another, we are
1088  // basically counting the number of values that are matching. So even if all
1089  // scores are set to 1, we would still get a decent matching result.
1090  // However, sometimes we have to break ties. For example we may have to
1091  // choose between matching loads vs matching opcodes. This is what these
1092  // scores are helping us with: they provide the order of preference. Also,
1093  // this is important if the scalar is externally used or used in another
1094  // tree entry node in the different lane.
1095 
1096  /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]).
1097  static const int ScoreConsecutiveLoads = 4;
1098  /// The same load multiple times. This should have a better score than
1099  /// `ScoreSplat` because it in x86 for a 2-lane vector we can represent it
1100  /// with `movddup (%reg), xmm0` which has a throughput of 0.5 versus 0.5 for
1101  /// a vector load and 1.0 for a broadcast.
1102  static const int ScoreSplatLoads = 3;
1103  /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]).
1104  static const int ScoreReversedLoads = 3;
1105  /// ExtractElementInst from same vector and consecutive indexes.
1106  static const int ScoreConsecutiveExtracts = 4;
1107  /// ExtractElementInst from same vector and reversed indices.
1108  static const int ScoreReversedExtracts = 3;
1109  /// Constants.
1110  static const int ScoreConstants = 2;
1111  /// Instructions with the same opcode.
1112  static const int ScoreSameOpcode = 2;
1113  /// Instructions with alt opcodes (e.g, add + sub).
1114  static const int ScoreAltOpcodes = 1;
1115  /// Identical instructions (a.k.a. splat or broadcast).
1116  static const int ScoreSplat = 1;
1117  /// Matching with an undef is preferable to failing.
1118  static const int ScoreUndef = 1;
1119  /// Score for failing to find a decent match.
1120  static const int ScoreFail = 0;
1121  /// Score if all users are vectorized.
1122  static const int ScoreAllUserVectorized = 1;
1123 
1124  /// \returns the score of placing \p V1 and \p V2 in consecutive lanes.
1125  /// \p U1 and \p U2 are the users of \p V1 and \p V2.
1126  /// Also, checks if \p V1 and \p V2 are compatible with instructions in \p
1127  /// MainAltOps.
1129  ArrayRef<Value *> MainAltOps) const {
1130  if (V1 == V2) {
1131  if (isa<LoadInst>(V1)) {
1132  // Retruns true if the users of V1 and V2 won't need to be extracted.
1133  auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) {
1134  // Bail out if we have too many uses to save compilation time.
1135  static constexpr unsigned Limit = 8;
1136  if (V1->hasNUsesOrMore(Limit) || V2->hasNUsesOrMore(Limit))
1137  return false;
1138 
1139  auto AllUsersVectorized = [U1, U2, this](Value *V) {
1140  return llvm::all_of(V->users(), [U1, U2, this](Value *U) {
1141  return U == U1 || U == U2 || R.getTreeEntry(U) != nullptr;
1142  });
1143  };
1144  return AllUsersVectorized(V1) && AllUsersVectorized(V2);
1145  };
1146  // A broadcast of a load can be cheaper on some targets.
1147  if (R.TTI->isLegalBroadcastLoad(V1->getType(),
1148  ElementCount::getFixed(NumLanes)) &&
1149  ((int)V1->getNumUses() == NumLanes ||
1150  AllUsersAreInternal(V1, V2)))
1151  return LookAheadHeuristics::ScoreSplatLoads;
1152  }
1153  return LookAheadHeuristics::ScoreSplat;
1154  }
1155 
1156  auto *LI1 = dyn_cast<LoadInst>(V1);
1157  auto *LI2 = dyn_cast<LoadInst>(V2);
1158  if (LI1 && LI2) {
1159  if (LI1->getParent() != LI2->getParent())
1160  return LookAheadHeuristics::ScoreFail;
1161 
1163  LI1->getType(), LI1->getPointerOperand(), LI2->getType(),
1164  LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true);
1165  if (!Dist || *Dist == 0)
1166  return LookAheadHeuristics::ScoreFail;
1167  // The distance is too large - still may be profitable to use masked
1168  // loads/gathers.
1169  if (std::abs(*Dist) > NumLanes / 2)
1170  return LookAheadHeuristics::ScoreAltOpcodes;
1171  // This still will detect consecutive loads, but we might have "holes"
1172  // in some cases. It is ok for non-power-2 vectorization and may produce
1173  // better results. It should not affect current vectorization.
1174  return (*Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveLoads
1175  : LookAheadHeuristics::ScoreReversedLoads;
1176  }
1177 
1178  auto *C1 = dyn_cast<Constant>(V1);
1179  auto *C2 = dyn_cast<Constant>(V2);
1180  if (C1 && C2)
1181  return LookAheadHeuristics::ScoreConstants;
1182 
1183  // Extracts from consecutive indexes of the same vector better score as
1184  // the extracts could be optimized away.
1185  Value *EV1;
1186  ConstantInt *Ex1Idx;
1187  if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) {
1188  // Undefs are always profitable for extractelements.
1189  if (isa<UndefValue>(V2))
1190  return LookAheadHeuristics::ScoreConsecutiveExtracts;
1191  Value *EV2 = nullptr;
1192  ConstantInt *Ex2Idx = nullptr;
1193  if (match(V2,
1195  m_Undef())))) {
1196  // Undefs are always profitable for extractelements.
1197  if (!Ex2Idx)
1198  return LookAheadHeuristics::ScoreConsecutiveExtracts;
1199  if (isUndefVector(EV2) && EV2->getType() == EV1->getType())
1200  return LookAheadHeuristics::ScoreConsecutiveExtracts;
1201  if (EV2 == EV1) {
1202  int Idx1 = Ex1Idx->getZExtValue();
1203  int Idx2 = Ex2Idx->getZExtValue();
1204  int Dist = Idx2 - Idx1;
1205  // The distance is too large - still may be profitable to use
1206  // shuffles.
1207  if (std::abs(Dist) == 0)
1208  return LookAheadHeuristics::ScoreSplat;
1209  if (std::abs(Dist) > NumLanes / 2)
1210  return LookAheadHeuristics::ScoreSameOpcode;
1211  return (Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveExtracts
1212  : LookAheadHeuristics::ScoreReversedExtracts;
1213  }
1214  return LookAheadHeuristics::ScoreAltOpcodes;
1215  }
1216  return LookAheadHeuristics::ScoreFail;
1217  }
1218 
1219  auto *I1 = dyn_cast<Instruction>(V1);
1220  auto *I2 = dyn_cast<Instruction>(V2);
1221  if (I1 && I2) {
1222  if (I1->getParent() != I2->getParent())
1223  return LookAheadHeuristics::ScoreFail;
1224  SmallVector<Value *, 4> Ops(MainAltOps.begin(), MainAltOps.end());
1225  Ops.push_back(I1);
1226  Ops.push_back(I2);
1227  InstructionsState S = getSameOpcode(Ops);
1228  // Note: Only consider instructions with <= 2 operands to avoid
1229  // complexity explosion.
1230  if (S.getOpcode() &&
1231  (S.MainOp->getNumOperands() <= 2 || !MainAltOps.empty() ||
1232  !S.isAltShuffle()) &&
1233  all_of(Ops, [&S](Value *V) {
1234  return cast<Instruction>(V)->getNumOperands() ==
1235  S.MainOp->getNumOperands();
1236  }))
1237  return S.isAltShuffle() ? LookAheadHeuristics::ScoreAltOpcodes
1238  : LookAheadHeuristics::ScoreSameOpcode;
1239  }
1240 
1241  if (isa<UndefValue>(V2))
1242  return LookAheadHeuristics::ScoreUndef;
1243 
1244  return LookAheadHeuristics::ScoreFail;
1245  }
1246 
1247  /// Go through the operands of \p LHS and \p RHS recursively until
1248  /// MaxLevel, and return the cummulative score. \p U1 and \p U2 are
1249  /// the users of \p LHS and \p RHS (that is \p LHS and \p RHS are operands
1250  /// of \p U1 and \p U2), except at the beginning of the recursion where
1251  /// these are set to nullptr.
1252  ///
1253  /// For example:
1254  /// \verbatim
1255  /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1]
1256  /// \ / \ / \ / \ /
1257  /// + + + +
1258  /// G1 G2 G3 G4
1259  /// \endverbatim
1260  /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at
1261  /// each level recursively, accumulating the score. It starts from matching
1262  /// the additions at level 0, then moves on to the loads (level 1). The
1263  /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and
1264  /// {B[0],B[1]} match with LookAheadHeuristics::ScoreConsecutiveLoads, while
1265  /// {A[0],C[0]} has a score of LookAheadHeuristics::ScoreFail.
1266  /// Please note that the order of the operands does not matter, as we
1267  /// evaluate the score of all profitable combinations of operands. In
1268  /// other words the score of G1 and G4 is the same as G1 and G2. This
1269  /// heuristic is based on ideas described in:
1270  /// Look-ahead SLP: Auto-vectorization in the presence of commutative
1271  /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
1272  /// Luís F. W. Góes
1274  Instruction *U2, int CurrLevel,
1275  ArrayRef<Value *> MainAltOps) const {
1276 
1277  // Get the shallow score of V1 and V2.
1278  int ShallowScoreAtThisLevel =
1279  getShallowScore(LHS, RHS, U1, U2, MainAltOps);
1280 
1281  // If reached MaxLevel,
1282  // or if V1 and V2 are not instructions,
1283  // or if they are SPLAT,
1284  // or if they are not consecutive,
1285  // or if profitable to vectorize loads or extractelements, early return
1286  // the current cost.
1287  auto *I1 = dyn_cast<Instruction>(LHS);
1288  auto *I2 = dyn_cast<Instruction>(RHS);
1289  if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 ||
1290  ShallowScoreAtThisLevel == LookAheadHeuristics::ScoreFail ||
1291  (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) ||
1292  (I1->getNumOperands() > 2 && I2->getNumOperands() > 2) ||
1293  (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) &&
1294  ShallowScoreAtThisLevel))
1295  return ShallowScoreAtThisLevel;
1296  assert(I1 && I2 && "Should have early exited.");
1297 
1298  // Contains the I2 operand indexes that got matched with I1 operands.
1299  SmallSet<unsigned, 4> Op2Used;
1300 
1301  // Recursion towards the operands of I1 and I2. We are trying all possible
1302  // operand pairs, and keeping track of the best score.
1303  for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands();
1304  OpIdx1 != NumOperands1; ++OpIdx1) {
1305  // Try to pair op1I with the best operand of I2.
1306  int MaxTmpScore = 0;
1307  unsigned MaxOpIdx2 = 0;
1308  bool FoundBest = false;
1309  // If I2 is commutative try all combinations.
1310  unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1;
1311  unsigned ToIdx = isCommutative(I2)
1312  ? I2->getNumOperands()
1313  : std::min(I2->getNumOperands(), OpIdx1 + 1);
1314  assert(FromIdx <= ToIdx && "Bad index");
1315  for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) {
1316  // Skip operands already paired with OpIdx1.
1317  if (Op2Used.count(OpIdx2))
1318  continue;
1319  // Recursively calculate the cost at each level
1320  int TmpScore =
1321  getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2),
1322  I1, I2, CurrLevel + 1, None);
1323  // Look for the best score.
1324  if (TmpScore > LookAheadHeuristics::ScoreFail &&
1325  TmpScore > MaxTmpScore) {
1326  MaxTmpScore = TmpScore;
1327  MaxOpIdx2 = OpIdx2;
1328  FoundBest = true;
1329  }
1330  }
1331  if (FoundBest) {
1332  // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it.
1333  Op2Used.insert(MaxOpIdx2);
1334  ShallowScoreAtThisLevel += MaxTmpScore;
1335  }
1336  }
1337  return ShallowScoreAtThisLevel;
1338  }
1339  };
1340  /// A helper data structure to hold the operands of a vector of instructions.
1341  /// This supports a fixed vector length for all operand vectors.
1342  class VLOperands {
1343  /// For each operand we need (i) the value, and (ii) the opcode that it
1344  /// would be attached to if the expression was in a left-linearized form.
1345  /// This is required to avoid illegal operand reordering.
1346  /// For example:
1347  /// \verbatim
1348  /// 0 Op1
1349  /// |/
1350  /// Op1 Op2 Linearized + Op2
1351  /// \ / ----------> |/
1352  /// - -
1353  ///
1354  /// Op1 - Op2 (0 + Op1) - Op2
1355  /// \endverbatim
1356  ///
1357  /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
1358  ///
1359  /// Another way to think of this is to track all the operations across the
1360  /// path from the operand all the way to the root of the tree and to
1361  /// calculate the operation that corresponds to this path. For example, the
1362  /// path from Op2 to the root crosses the RHS of the '-', therefore the
1363  /// corresponding operation is a '-' (which matches the one in the
1364  /// linearized tree, as shown above).
1365  ///
1366  /// For lack of a better term, we refer to this operation as Accumulated
1367  /// Path Operation (APO).
1368  struct OperandData {
1369  OperandData() = default;
1370  OperandData(Value *V, bool APO, bool IsUsed)
1371  : V(V), APO(APO), IsUsed(IsUsed) {}
1372  /// The operand value.
1373  Value *V = nullptr;
1374  /// TreeEntries only allow a single opcode, or an alternate sequence of
1375  /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
1376  /// APO. It is set to 'true' if 'V' is attached to an inverse operation
1377  /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
1378  /// (e.g., Add/Mul)
1379  bool APO = false;
1380  /// Helper data for the reordering function.
1381  bool IsUsed = false;
1382  };
1383 
1384  /// During operand reordering, we are trying to select the operand at lane
1385  /// that matches best with the operand at the neighboring lane. Our
1386  /// selection is based on the type of value we are looking for. For example,
1387  /// if the neighboring lane has a load, we need to look for a load that is
1388  /// accessing a consecutive address. These strategies are summarized in the
1389  /// 'ReorderingMode' enumerator.
1390  enum class ReorderingMode {
1391  Load, ///< Matching loads to consecutive memory addresses
1392  Opcode, ///< Matching instructions based on opcode (same or alternate)
1393  Constant, ///< Matching constants
1394  Splat, ///< Matching the same instruction multiple times (broadcast)
1395  Failed, ///< We failed to create a vectorizable group
1396  };
1397 
1399 
1400  /// A vector of operand vectors.
1402 
1403  const DataLayout &DL;
1404  ScalarEvolution &SE;
1405  const BoUpSLP &R;
1406 
1407  /// \returns the operand data at \p OpIdx and \p Lane.
1408  OperandData &getData(unsigned OpIdx, unsigned Lane) {
1409  return OpsVec[OpIdx][Lane];
1410  }
1411 
1412  /// \returns the operand data at \p OpIdx and \p Lane. Const version.
1413  const OperandData &getData(unsigned OpIdx, unsigned Lane) const {
1414  return OpsVec[OpIdx][Lane];
1415  }
1416 
1417  /// Clears the used flag for all entries.
1418  void clearUsed() {
1419  for (unsigned OpIdx = 0, NumOperands = getNumOperands();
1420  OpIdx != NumOperands; ++OpIdx)
1421  for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
1422  ++Lane)
1423  OpsVec[OpIdx][Lane].IsUsed = false;
1424  }
1425 
1426  /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
1427  void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) {
1428  std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]);
1429  }
1430 
1431  /// \param Lane lane of the operands under analysis.
1432  /// \param OpIdx operand index in \p Lane lane we're looking the best
1433  /// candidate for.
1434  /// \param Idx operand index of the current candidate value.
1435  /// \returns The additional score due to possible broadcasting of the
1436  /// elements in the lane. It is more profitable to have power-of-2 unique
1437  /// elements in the lane, it will be vectorized with higher probability
1438  /// after removing duplicates. Currently the SLP vectorizer supports only
1439  /// vectorization of the power-of-2 number of unique scalars.
1440  int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
1441  Value *IdxLaneV = getData(Idx, Lane).V;
1442  if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V)
1443  return 0;
1444  SmallPtrSet<Value *, 4> Uniques;
1445  for (unsigned Ln = 0, E = getNumLanes(); Ln < E; ++Ln) {
1446  if (Ln == Lane)
1447  continue;
1448  Value *OpIdxLnV = getData(OpIdx, Ln).V;
1449  if (!isa<Instruction>(OpIdxLnV))
1450  return 0;
1451  Uniques.insert(OpIdxLnV);
1452  }
1453  int UniquesCount = Uniques.size();
1454  int UniquesCntWithIdxLaneV =
1455  Uniques.contains(IdxLaneV) ? UniquesCount : UniquesCount + 1;
1456  Value *OpIdxLaneV = getData(OpIdx, Lane).V;
1457  int UniquesCntWithOpIdxLaneV =
1458  Uniques.contains(OpIdxLaneV) ? UniquesCount : UniquesCount + 1;
1459  if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV)
1460  return 0;
1461  return (PowerOf2Ceil(UniquesCntWithOpIdxLaneV) -
1462  UniquesCntWithOpIdxLaneV) -
1463  (PowerOf2Ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV);
1464  }
1465 
1466  /// \param Lane lane of the operands under analysis.
1467  /// \param OpIdx operand index in \p Lane lane we're looking the best
1468  /// candidate for.
1469  /// \param Idx operand index of the current candidate value.
1470  /// \returns The additional score for the scalar which users are all
1471  /// vectorized.
1472  int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
1473  Value *IdxLaneV = getData(Idx, Lane).V;
1474  Value *OpIdxLaneV = getData(OpIdx, Lane).V;
1475  // Do not care about number of uses for vector-like instructions
1476  // (extractelement/extractvalue with constant indices), they are extracts
1477  // themselves and already externally used. Vectorization of such
1478  // instructions does not add extra extractelement instruction, just may
1479  // remove it.
1480  if (isVectorLikeInstWithConstOps(IdxLaneV) &&
1481  isVectorLikeInstWithConstOps(OpIdxLaneV))
1482  return LookAheadHeuristics::ScoreAllUserVectorized;
1483  auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV);
1484  if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV))
1485  return 0;
1486  return R.areAllUsersVectorized(IdxLaneI, None)
1487  ? LookAheadHeuristics::ScoreAllUserVectorized
1488  : 0;
1489  }
1490 
1491  /// Score scaling factor for fully compatible instructions but with
1492  /// different number of external uses. Allows better selection of the
1493  /// instructions with less external uses.
1494  static const int ScoreScaleFactor = 10;
1495 
1496  /// \Returns the look-ahead score, which tells us how much the sub-trees
1497  /// rooted at \p LHS and \p RHS match, the more they match the higher the
1498  /// score. This helps break ties in an informed way when we cannot decide on
1499  /// the order of the operands by just considering the immediate
1500  /// predecessors.
1501  int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps,
1502  int Lane, unsigned OpIdx, unsigned Idx,
1503  bool &IsUsed) {
1504  LookAheadHeuristics LookAhead(DL, SE, R, getNumLanes(),
1506  // Keep track of the instruction stack as we recurse into the operands
1507  // during the look-ahead score exploration.
1508  int Score =
1509  LookAhead.getScoreAtLevelRec(LHS, RHS, /*U1=*/nullptr, /*U2=*/nullptr,
1510  /*CurrLevel=*/1, MainAltOps);
1511  if (Score) {
1512  int SplatScore = getSplatScore(Lane, OpIdx, Idx);
1513  if (Score <= -SplatScore) {
1514  // Set the minimum score for splat-like sequence to avoid setting
1515  // failed state.
1516  Score = 1;
1517  } else {
1518  Score += SplatScore;
1519  // Scale score to see the difference between different operands
1520  // and similar operands but all vectorized/not all vectorized
1521  // uses. It does not affect actual selection of the best
1522  // compatible operand in general, just allows to select the
1523  // operand with all vectorized uses.
1524  Score *= ScoreScaleFactor;
1525  Score += getExternalUseScore(Lane, OpIdx, Idx);
1526  IsUsed = true;
1527  }
1528  }
1529  return Score;
1530  }
1531 
1532  /// Best defined scores per lanes between the passes. Used to choose the
1533  /// best operand (with the highest score) between the passes.
1534  /// The key - {Operand Index, Lane}.
1535  /// The value - the best score between the passes for the lane and the
1536  /// operand.
1538  BestScoresPerLanes;
1539 
1540  // Search all operands in Ops[*][Lane] for the one that matches best
1541  // Ops[OpIdx][LastLane] and return its opreand index.
1542  // If no good match can be found, return None.
1543  Optional<unsigned> getBestOperand(unsigned OpIdx, int Lane, int LastLane,
1544  ArrayRef<ReorderingMode> ReorderingModes,
1545  ArrayRef<Value *> MainAltOps) {
1546  unsigned NumOperands = getNumOperands();
1547 
1548  // The operand of the previous lane at OpIdx.
1549  Value *OpLastLane = getData(OpIdx, LastLane).V;
1550 
1551  // Our strategy mode for OpIdx.
1552  ReorderingMode RMode = ReorderingModes[OpIdx];
1553  if (RMode == ReorderingMode::Failed)
1554  return None;
1555 
1556  // The linearized opcode of the operand at OpIdx, Lane.
1557  bool OpIdxAPO = getData(OpIdx, Lane).APO;
1558 
1559  // The best operand index and its score.
1560  // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
1561  // are using the score to differentiate between the two.
1562  struct BestOpData {
1563  Optional<unsigned> Idx = None;
1564  unsigned Score = 0;
1565  } BestOp;
1566  BestOp.Score =
1567  BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0)
1568  .first->second;
1569 
1570  // Track if the operand must be marked as used. If the operand is set to
1571  // Score 1 explicitly (because of non power-of-2 unique scalars, we may
1572  // want to reestimate the operands again on the following iterations).
1573  bool IsUsed =
1574  RMode == ReorderingMode::Splat || RMode == ReorderingMode::Constant;
1575  // Iterate through all unused operands and look for the best.
1576  for (unsigned Idx = 0; Idx != NumOperands; ++Idx) {
1577  // Get the operand at Idx and Lane.
1578  OperandData &OpData = getData(Idx, Lane);
1579  Value *Op = OpData.V;
1580  bool OpAPO = OpData.APO;
1581 
1582  // Skip already selected operands.
1583  if (OpData.IsUsed)
1584  continue;
1585 
1586  // Skip if we are trying to move the operand to a position with a
1587  // different opcode in the linearized tree form. This would break the
1588  // semantics.
1589  if (OpAPO != OpIdxAPO)
1590  continue;
1591 
1592  // Look for an operand that matches the current mode.
1593  switch (RMode) {
1594  case ReorderingMode::Load:
1596  case ReorderingMode::Opcode: {
1597  bool LeftToRight = Lane > LastLane;
1598  Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
1599  Value *OpRight = (LeftToRight) ? Op : OpLastLane;
1600  int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane,
1601  OpIdx, Idx, IsUsed);
1602  if (Score > static_cast<int>(BestOp.Score)) {
1603  BestOp.Idx = Idx;
1604  BestOp.Score = Score;
1605  BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score;
1606  }
1607  break;
1608  }
1609  case ReorderingMode::Splat:
1610  if (Op == OpLastLane)
1611  BestOp.Idx = Idx;
1612  break;
1614  llvm_unreachable("Not expected Failed reordering mode.");
1615  }
1616  }
1617 
1618  if (BestOp.Idx) {
1619  getData(*BestOp.Idx, Lane).IsUsed = IsUsed;
1620  return BestOp.Idx;
1621  }
1622  // If we could not find a good match return None.
1623  return None;
1624  }
1625 
1626  /// Helper for reorderOperandVecs.
1627  /// \returns the lane that we should start reordering from. This is the one
1628  /// which has the least number of operands that can freely move about or
1629  /// less profitable because it already has the most optimal set of operands.
1630  unsigned getBestLaneToStartReordering() const {
1631  unsigned Min = UINT_MAX;
1632  unsigned SameOpNumber = 0;
1633  // std::pair<unsigned, unsigned> is used to implement a simple voting
1634  // algorithm and choose the lane with the least number of operands that
1635  // can freely move about or less profitable because it already has the
1636  // most optimal set of operands. The first unsigned is a counter for
1637  // voting, the second unsigned is the counter of lanes with instructions
1638  // with same/alternate opcodes and same parent basic block.
1640  // Try to be closer to the original results, if we have multiple lanes
1641  // with same cost. If 2 lanes have the same cost, use the one with the
1642  // lowest index.
1643  for (int I = getNumLanes(); I > 0; --I) {
1644  unsigned Lane = I - 1;
1645  OperandsOrderData NumFreeOpsHash =
1646  getMaxNumOperandsThatCanBeReordered(Lane);
1647  // Compare the number of operands that can move and choose the one with
1648  // the least number.
1649  if (NumFreeOpsHash.NumOfAPOs < Min) {
1650  Min = NumFreeOpsHash.NumOfAPOs;
1651  SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
1652  HashMap.clear();
1653  HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
1654  } else if (NumFreeOpsHash.NumOfAPOs == Min &&
1655  NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) {
1656  // Select the most optimal lane in terms of number of operands that
1657  // should be moved around.
1658  SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
1659  HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
1660  } else if (NumFreeOpsHash.NumOfAPOs == Min &&
1661  NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) {
1662  auto It = HashMap.find(NumFreeOpsHash.Hash);
1663  if (It == HashMap.end())
1664  HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
1665  else
1666  ++It->second.first;
1667  }
1668  }
1669  // Select the lane with the minimum counter.
1670  unsigned BestLane = 0;
1671  unsigned CntMin = UINT_MAX;
1672  for (const auto &Data : reverse(HashMap)) {
1673  if (Data.second.first < CntMin) {
1674  CntMin = Data.second.first;
1675  BestLane = Data.second.second;
1676  }
1677  }
1678  return BestLane;
1679  }
1680 
1681  /// Data structure that helps to reorder operands.
1682  struct OperandsOrderData {
1683  /// The best number of operands with the same APOs, which can be
1684  /// reordered.
1685  unsigned NumOfAPOs = UINT_MAX;
1686  /// Number of operands with the same/alternate instruction opcode and
1687  /// parent.
1688  unsigned NumOpsWithSameOpcodeParent = 0;
1689  /// Hash for the actual operands ordering.
1690  /// Used to count operands, actually their position id and opcode
1691  /// value. It is used in the voting mechanism to find the lane with the
1692  /// least number of operands that can freely move about or less profitable
1693  /// because it already has the most optimal set of operands. Can be
1694  /// replaced with SmallVector<unsigned> instead but hash code is faster
1695  /// and requires less memory.
1696  unsigned Hash = 0;
1697  };
1698  /// \returns the maximum number of operands that are allowed to be reordered
1699  /// for \p Lane and the number of compatible instructions(with the same
1700  /// parent/opcode). This is used as a heuristic for selecting the first lane
1701  /// to start operand reordering.
1702  OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const {
1703  unsigned CntTrue = 0;
1704  unsigned NumOperands = getNumOperands();
1705  // Operands with the same APO can be reordered. We therefore need to count
1706  // how many of them we have for each APO, like this: Cnt[APO] = x.
1707  // Since we only have two APOs, namely true and false, we can avoid using
1708  // a map. Instead we can simply count the number of operands that
1709  // correspond to one of them (in this case the 'true' APO), and calculate
1710  // the other by subtracting it from the total number of operands.
1711  // Operands with the same instruction opcode and parent are more
1712  // profitable since we don't need to move them in many cases, with a high
1713  // probability such lane already can be vectorized effectively.
1714  bool AllUndefs = true;
1715  unsigned NumOpsWithSameOpcodeParent = 0;
1716  Instruction *OpcodeI = nullptr;
1717  BasicBlock *Parent = nullptr;
1718  unsigned Hash = 0;
1719  for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1720  const OperandData &OpData = getData(OpIdx, Lane);
1721  if (OpData.APO)
1722  ++CntTrue;
1723  // Use Boyer-Moore majority voting for finding the majority opcode and
1724  // the number of times it occurs.
1725  if (auto *I = dyn_cast<Instruction>(OpData.V)) {
1726  if (!OpcodeI || !getSameOpcode({OpcodeI, I}).getOpcode() ||
1727  I->getParent() != Parent) {
1728  if (NumOpsWithSameOpcodeParent == 0) {
1729  NumOpsWithSameOpcodeParent = 1;
1730  OpcodeI = I;
1731  Parent = I->getParent();
1732  } else {
1733  --NumOpsWithSameOpcodeParent;
1734  }
1735  } else {
1736  ++NumOpsWithSameOpcodeParent;
1737  }
1738  }
1739  Hash = hash_combine(
1740  Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1)));
1741  AllUndefs = AllUndefs && isa<UndefValue>(OpData.V);
1742  }
1743  if (AllUndefs)
1744  return {};
1745  OperandsOrderData Data;
1746  Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue);
1747  Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent;
1748  Data.Hash = Hash;
1749  return Data;
1750  }
1751 
1752  /// Go through the instructions in VL and append their operands.
1753  void appendOperandsOfVL(ArrayRef<Value *> VL) {
1754  assert(!VL.empty() && "Bad VL");
1755  assert((empty() || VL.size() == getNumLanes()) &&
1756  "Expected same number of lanes");
1757  assert(isa<Instruction>(VL[0]) && "Expected instruction");
1758  unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands();
1759  OpsVec.resize(NumOperands);
1760  unsigned NumLanes = VL.size();
1761  for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1762  OpsVec[OpIdx].resize(NumLanes);
1763  for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1764  assert(isa<Instruction>(VL[Lane]) && "Expected instruction");
1765  // Our tree has just 3 nodes: the root and two operands.
1766  // It is therefore trivial to get the APO. We only need to check the
1767  // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
1768  // RHS operand. The LHS operand of both add and sub is never attached
1769  // to an inversese operation in the linearized form, therefore its APO
1770  // is false. The RHS is true only if VL[Lane] is an inverse operation.
1771 
1772  // Since operand reordering is performed on groups of commutative
1773  // operations or alternating sequences (e.g., +, -), we can safely
1774  // tell the inverse operations by checking commutativity.
1775  bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane]));
1776  bool APO = (OpIdx == 0) ? false : IsInverseOperation;
1777  OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx),
1778  APO, false};
1779  }
1780  }
1781  }
1782 
1783  /// \returns the number of operands.
1784  unsigned getNumOperands() const { return OpsVec.size(); }
1785 
1786  /// \returns the number of lanes.
1787  unsigned getNumLanes() const { return OpsVec[0].size(); }
1788 
1789  /// \returns the operand value at \p OpIdx and \p Lane.
1790  Value *getValue(unsigned OpIdx, unsigned Lane) const {
1791  return getData(OpIdx, Lane).V;
1792  }
1793 
1794  /// \returns true if the data structure is empty.
1795  bool empty() const { return OpsVec.empty(); }
1796 
1797  /// Clears the data.
1798  void clear() { OpsVec.clear(); }
1799 
1800  /// \Returns true if there are enough operands identical to \p Op to fill
1801  /// the whole vector.
1802  /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
1803  bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) {
1804  bool OpAPO = getData(OpIdx, Lane).APO;
1805  for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
1806  if (Ln == Lane)
1807  continue;
1808  // This is set to true if we found a candidate for broadcast at Lane.
1809  bool FoundCandidate = false;
1810  for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) {
1811  OperandData &Data = getData(OpI, Ln);
1812  if (Data.APO != OpAPO || Data.IsUsed)
1813  continue;
1814  if (Data.V == Op) {
1815  FoundCandidate = true;
1816  Data.IsUsed = true;
1817  break;
1818  }
1819  }
1820  if (!FoundCandidate)
1821  return false;
1822  }
1823  return true;
1824  }
1825 
1826  public:
1827  /// Initialize with all the operands of the instruction vector \p RootVL.
1829  ScalarEvolution &SE, const BoUpSLP &R)
1830  : DL(DL), SE(SE), R(R) {
1831  // Append all the operands of RootVL.
1832  appendOperandsOfVL(RootVL);
1833  }
1834 
1835  /// \Returns a value vector with the operands across all lanes for the
1836  /// opearnd at \p OpIdx.
1837  ValueList getVL(unsigned OpIdx) const {
1838  ValueList OpVL(OpsVec[OpIdx].size());
1839  assert(OpsVec[OpIdx].size() == getNumLanes() &&
1840  "Expected same num of lanes across all operands");
1841  for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane)
1842  OpVL[Lane] = OpsVec[OpIdx][Lane].V;
1843  return OpVL;
1844  }
1845 
1846  // Performs operand reordering for 2 or more operands.
1847  // The original operands are in OrigOps[OpIdx][Lane].
1848  // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
1849  void reorder() {
1850  unsigned NumOperands = getNumOperands();
1851  unsigned NumLanes = getNumLanes();
1852  // Each operand has its own mode. We are using this mode to help us select
1853  // the instructions for each lane, so that they match best with the ones
1854  // we have selected so far.
1855  SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands);
1856 
1857  // This is a greedy single-pass algorithm. We are going over each lane
1858  // once and deciding on the best order right away with no back-tracking.
1859  // However, in order to increase its effectiveness, we start with the lane
1860  // that has operands that can move the least. For example, given the
1861  // following lanes:
1862  // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd
1863  // Lane 1 : A[1] = C[1] - B[1] // Visited 1st
1864  // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd
1865  // Lane 3 : A[3] = C[3] - B[3] // Visited 4th
1866  // we will start at Lane 1, since the operands of the subtraction cannot
1867  // be reordered. Then we will visit the rest of the lanes in a circular
1868  // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
1869 
1870  // Find the first lane that we will start our search from.
1871  unsigned FirstLane = getBestLaneToStartReordering();
1872 
1873  // Initialize the modes.
1874  for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1875  Value *OpLane0 = getValue(OpIdx, FirstLane);
1876  // Keep track if we have instructions with all the same opcode on one
1877  // side.
1878  if (isa<LoadInst>(OpLane0))
1879  ReorderingModes[OpIdx] = ReorderingMode::Load;
1880  else if (isa<Instruction>(OpLane0)) {
1881  // Check if OpLane0 should be broadcast.
1882  if (shouldBroadcast(OpLane0, OpIdx, FirstLane))
1883  ReorderingModes[OpIdx] = ReorderingMode::Splat;
1884  else
1885  ReorderingModes[OpIdx] = ReorderingMode::Opcode;
1886  }
1887  else if (isa<Constant>(OpLane0))
1888  ReorderingModes[OpIdx] = ReorderingMode::Constant;
1889  else if (isa<Argument>(OpLane0))
1890  // Our best hope is a Splat. It may save some cost in some cases.
1891  ReorderingModes[OpIdx] = ReorderingMode::Splat;
1892  else
1893  // NOTE: This should be unreachable.
1894  ReorderingModes[OpIdx] = ReorderingMode::Failed;
1895  }
1896 
1897  // Check that we don't have same operands. No need to reorder if operands
1898  // are just perfect diamond or shuffled diamond match. Do not do it only
1899  // for possible broadcasts or non-power of 2 number of scalars (just for
1900  // now).
1901  auto &&SkipReordering = [this]() {
1902  SmallPtrSet<Value *, 4> UniqueValues;
1903  ArrayRef<OperandData> Op0 = OpsVec.front();
1904  for (const OperandData &Data : Op0)
1905  UniqueValues.insert(Data.V);
1906  for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) {
1907  if (any_of(Op, [&UniqueValues](const OperandData &Data) {
1908  return !UniqueValues.contains(Data.V);
1909  }))
1910  return false;
1911  }
1912  // TODO: Check if we can remove a check for non-power-2 number of
1913  // scalars after full support of non-power-2 vectorization.
1914  return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size());
1915  };
1916 
1917  // If the initial strategy fails for any of the operand indexes, then we
1918  // perform reordering again in a second pass. This helps avoid assigning
1919  // high priority to the failed strategy, and should improve reordering for
1920  // the non-failed operand indexes.
1921  for (int Pass = 0; Pass != 2; ++Pass) {
1922  // Check if no need to reorder operands since they're are perfect or
1923  // shuffled diamond match.
1924  // Need to to do it to avoid extra external use cost counting for
1925  // shuffled matches, which may cause regressions.
1926  if (SkipReordering())
1927  break;
1928  // Skip the second pass if the first pass did not fail.
1929  bool StrategyFailed = false;
1930  // Mark all operand data as free to use.
1931  clearUsed();
1932  // We keep the original operand order for the FirstLane, so reorder the
1933  // rest of the lanes. We are visiting the nodes in a circular fashion,
1934  // using FirstLane as the center point and increasing the radius
1935  // distance.
1936  SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands);
1937  for (unsigned I = 0; I < NumOperands; ++I)
1938  MainAltOps[I].push_back(getData(I, FirstLane).V);
1939 
1940  for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
1941  // Visit the lane on the right and then the lane on the left.
1942  for (int Direction : {+1, -1}) {
1943  int Lane = FirstLane + Direction * Distance;
1944  if (Lane < 0 || Lane >= (int)NumLanes)
1945  continue;
1946  int LastLane = Lane - Direction;
1947  assert(LastLane >= 0 && LastLane < (int)NumLanes &&
1948  "Out of bounds");
1949  // Look for a good match for each operand.
1950  for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1951  // Search for the operand that matches SortedOps[OpIdx][Lane-1].
1952  Optional<unsigned> BestIdx = getBestOperand(
1953  OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]);
1954  // By not selecting a value, we allow the operands that follow to
1955  // select a better matching value. We will get a non-null value in
1956  // the next run of getBestOperand().
1957  if (BestIdx) {
1958  // Swap the current operand with the one returned by
1959  // getBestOperand().
1960  swap(OpIdx, *BestIdx, Lane);
1961  } else {
1962  // We failed to find a best operand, set mode to 'Failed'.
1963  ReorderingModes[OpIdx] = ReorderingMode::Failed;
1964  // Enable the second pass.
1965  StrategyFailed = true;
1966  }
1967  // Try to get the alternate opcode and follow it during analysis.
1968  if (MainAltOps[OpIdx].size() != 2) {
1969  OperandData &AltOp = getData(OpIdx, Lane);
1970  InstructionsState OpS =
1971  getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V});
1972  if (OpS.getOpcode() && OpS.isAltShuffle())
1973  MainAltOps[OpIdx].push_back(AltOp.V);
1974  }
1975  }
1976  }
1977  }
1978  // Skip second pass if the strategy did not fail.
1979  if (!StrategyFailed)
1980  break;
1981  }
1982  }
1983 
1984 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1985  LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) {
1986  switch (RMode) {
1987  case ReorderingMode::Load:
1988  return "Load";
1989  case ReorderingMode::Opcode:
1990  return "Opcode";
1992  return "Constant";
1993  case ReorderingMode::Splat:
1994  return "Splat";
1996  return "Failed";
1997  }
1998  llvm_unreachable("Unimplemented Reordering Type");
1999  }
2000 
2001  LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode,
2002  raw_ostream &OS) {
2003  return OS << getModeStr(RMode);
2004  }
2005 
2006  /// Debug print.
2007  LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) {
2008  printMode(RMode, dbgs());
2009  }
2010 
2011  friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) {
2012  return printMode(RMode, OS);
2013  }
2014 
2016  const unsigned Indent = 2;
2017  unsigned Cnt = 0;
2018  for (const OperandDataVec &OpDataVec : OpsVec) {
2019  OS << "Operand " << Cnt++ << "\n";
2020  for (const OperandData &OpData : OpDataVec) {
2021  OS.indent(Indent) << "{";
2022  if (Value *V = OpData.V)
2023  OS << *V;
2024  else
2025  OS << "null";
2026  OS << ", APO:" << OpData.APO << "}\n";
2027  }
2028  OS << "\n";
2029  }
2030  return OS;
2031  }
2032 
2033  /// Debug print.
2034  LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
2035 #endif
2036  };
2037 
2038  /// Evaluate each pair in \p Candidates and return index into \p Candidates
2039  /// for a pair which have highest score deemed to have best chance to form
2040  /// root of profitable tree to vectorize. Return None if no candidate scored
2041  /// above the LookAheadHeuristics::ScoreFail.
2042  /// \param Limit Lower limit of the cost, considered to be good enough score.
2044  findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates,
2045  int Limit = LookAheadHeuristics::ScoreFail) {
2046  LookAheadHeuristics LookAhead(*DL, *SE, *this, /*NumLanes=*/2,
2048  int BestScore = Limit;
2050  for (int I : seq<int>(0, Candidates.size())) {
2051  int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first,
2052  Candidates[I].second,
2053  /*U1=*/nullptr, /*U2=*/nullptr,
2054  /*Level=*/1, None);
2055  if (Score > BestScore) {
2056  BestScore = Score;
2057  Index = I;
2058  }
2059  }
2060  return Index;
2061  }
2062 
2063  /// Checks if the instruction is marked for deletion.
2064  bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); }
2065 
2066  /// Removes an instruction from its block and eventually deletes it.
2067  /// It's like Instruction::eraseFromParent() except that the actual deletion
2068  /// is delayed until BoUpSLP is destructed.
2070  DeletedInstructions.insert(I);
2071  }
2072 
2073  /// Checks if the instruction was already analyzed for being possible
2074  /// reduction root.
2076  return AnalyzedReductionsRoots.count(I);
2077  }
2078  /// Register given instruction as already analyzed for being possible
2079  /// reduction root.
2081  AnalyzedReductionsRoots.insert(I);
2082  }
2083  /// Checks if the provided list of reduced values was checked already for
2084  /// vectorization.
2086  return AnalyzedReductionVals.contains(hash_value(VL));
2087  }
2088  /// Adds the list of reduced values to list of already checked values for the
2089  /// vectorization.
2091  AnalyzedReductionVals.insert(hash_value(VL));
2092  }
2093  /// Clear the list of the analyzed reduction root instructions.
2095  AnalyzedReductionsRoots.clear();
2096  AnalyzedReductionVals.clear();
2097  }
2098  /// Checks if the given value is gathered in one of the nodes.
2099  bool isAnyGathered(const SmallDenseSet<Value *> &Vals) const {
2100  return any_of(MustGather, [&](Value *V) { return Vals.contains(V); });
2101  }
2102 
2103  ~BoUpSLP();
2104 
2105 private:
2106  /// Check if the operands on the edges \p Edges of the \p UserTE allows
2107  /// reordering (i.e. the operands can be reordered because they have only one
2108  /// user and reordarable).
2109  /// \param ReorderableGathers List of all gather nodes that require reordering
2110  /// (e.g., gather of extractlements or partially vectorizable loads).
2111  /// \param GatherOps List of gather operand nodes for \p UserTE that require
2112  /// reordering, subset of \p NonVectorized.
2113  bool
2114  canReorderOperands(TreeEntry *UserTE,
2115  SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
2116  ArrayRef<TreeEntry *> ReorderableGathers,
2117  SmallVectorImpl<TreeEntry *> &GatherOps);
2118 
2119  /// Checks if the given \p TE is a gather node with clustered reused scalars
2120  /// and reorders it per given \p Mask.
2121  void reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const;
2122 
2123  /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph,
2124  /// if any. If it is not vectorized (gather node), returns nullptr.
2125  TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) {
2126  ArrayRef<Value *> VL = UserTE->getOperand(OpIdx);
2127  TreeEntry *TE = nullptr;
2128  const auto *It = find_if(VL, [this, &TE](Value *V) {
2129  TE = getTreeEntry(V);
2130  return TE;
2131  });
2132  if (It != VL.end() && TE->isSame(VL))
2133  return TE;
2134  return nullptr;
2135  }
2136 
2137  /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph,
2138  /// if any. If it is not vectorized (gather node), returns nullptr.
2139  const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE,
2140  unsigned OpIdx) const {
2141  return const_cast<BoUpSLP *>(this)->getVectorizedOperand(
2142  const_cast<TreeEntry *>(UserTE), OpIdx);
2143  }
2144 
2145  /// Checks if all users of \p I are the part of the vectorization tree.
2146  bool areAllUsersVectorized(Instruction *I,
2147  ArrayRef<Value *> VectorizedVals) const;
2148 
2149  /// Return information about the vector formed for the specified index
2150  /// of a vector of (the same) instruction.
2152  unsigned OpIdx);
2153 
2154  /// \returns the cost of the vectorizable entry.
2155  InstructionCost getEntryCost(const TreeEntry *E,
2156  ArrayRef<Value *> VectorizedVals);
2157 
2158  /// This is the recursive part of buildTree.
2159  void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth,
2160  const EdgeInfo &EI);
2161 
2162  /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
2163  /// be vectorized to use the original vector (or aggregate "bitcast" to a
2164  /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
2165  /// returns false, setting \p CurrentOrder to either an empty vector or a
2166  /// non-identity permutation that allows to reuse extract instructions.
2167  bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
2168  SmallVectorImpl<unsigned> &CurrentOrder) const;
2169 
2170  /// Vectorize a single entry in the tree.
2171  Value *vectorizeTree(TreeEntry *E);
2172 
2173  /// Vectorize a single entry in the tree, starting in \p VL.
2174  Value *vectorizeTree(ArrayRef<Value *> VL);
2175 
2176  /// Create a new vector from a list of scalar values. Produces a sequence
2177  /// which exploits values reused across lanes, and arranges the inserts
2178  /// for ease of later optimization.
2179  Value *createBuildVector(ArrayRef<Value *> VL);
2180 
2181  /// \returns the scalarization cost for this type. Scalarization in this
2182  /// context means the creation of vectors from a group of scalars. If \p
2183  /// NeedToShuffle is true, need to add a cost of reshuffling some of the
2184  /// vector elements.
2185  InstructionCost getGatherCost(FixedVectorType *Ty,
2186  const APInt &ShuffledIndices,
2187  bool NeedToShuffle) const;
2188 
2189  /// Returns the instruction in the bundle, which can be used as a base point
2190  /// for scheduling. Usually it is the last instruction in the bundle, except
2191  /// for the case when all operands are external (in this case, it is the first
2192  /// instruction in the list).
2193  Instruction &getLastInstructionInBundle(const TreeEntry *E);
2194 
2195  /// Checks if the gathered \p VL can be represented as shuffle(s) of previous
2196  /// tree entries.
2197  /// \returns ShuffleKind, if gathered values can be represented as shuffles of
2198  /// previous tree entries. \p Mask is filled with the shuffle mask.
2200  isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
2202 
2203  /// \returns the scalarization cost for this list of values. Assuming that
2204  /// this subtree gets vectorized, we may need to extract the values from the
2205  /// roots. This method calculates the cost of extracting the values.
2206  InstructionCost getGatherCost(ArrayRef<Value *> VL) const;
2207 
2208  /// Set the Builder insert point to one after the last instruction in
2209  /// the bundle
2210  void setInsertPointAfterBundle(const TreeEntry *E);
2211 
2212  /// \returns a vector from a collection of scalars in \p VL.
2213  Value *gather(ArrayRef<Value *> VL);
2214 
2215  /// \returns whether the VectorizableTree is fully vectorizable and will
2216  /// be beneficial even the tree height is tiny.
2217  bool isFullyVectorizableTinyTree(bool ForReduction) const;
2218 
2219  /// Reorder commutative or alt operands to get better probability of
2220  /// generating vectorized code.
2221  static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
2223  SmallVectorImpl<Value *> &Right,
2224  const DataLayout &DL,
2225  ScalarEvolution &SE,
2226  const BoUpSLP &R);
2227 
2228  /// Helper for `findExternalStoreUsersReorderIndices()`. It iterates over the
2229  /// users of \p TE and collects the stores. It returns the map from the store
2230  /// pointers to the collected stores.
2232  collectUserStores(const BoUpSLP::TreeEntry *TE) const;
2233 
2234  /// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the
2235  /// stores in \p StoresVec can form a vector instruction. If so it returns true
2236  /// and populates \p ReorderIndices with the shuffle indices of the the stores
2237  /// when compared to the sorted vector.
2238  bool canFormVector(const SmallVector<StoreInst *, 4> &StoresVec,
2239  OrdersType &ReorderIndices) const;
2240 
2241  /// Iterates through the users of \p TE, looking for scalar stores that can be
2242  /// potentially vectorized in a future SLP-tree. If found, it keeps track of
2243  /// their order and builds an order index vector for each store bundle. It
2244  /// returns all these order vectors found.
2245  /// We run this after the tree has formed, otherwise we may come across user
2246  /// instructions that are not yet in the tree.
2248  findExternalStoreUsersReorderIndices(TreeEntry *TE) const;
2249 
2250  struct TreeEntry {
2251  using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>;
2252  TreeEntry(VecTreeTy &Container) : Container(Container) {}
2253 
2254  /// \returns true if the scalars in VL are equal to this entry.
2255  bool isSame(ArrayRef<Value *> VL) const {
2256  auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) {
2257  if (Mask.size() != VL.size() && VL.size() == Scalars.size())
2258  return std::equal(VL.begin(), VL.end(), Scalars.begin());
2259  return VL.size() == Mask.size() &&
2260  std::equal(VL.begin(), VL.end(), Mask.begin(),
2261  [Scalars](Value *V, int Idx) {
2262  return (isa<UndefValue>(V) &&
2263  Idx == UndefMaskElem) ||
2264  (Idx != UndefMaskElem && V == Scalars[Idx]);
2265  });
2266  };
2267  if (!ReorderIndices.empty()) {
2268  // TODO: implement matching if the nodes are just reordered, still can
2269  // treat the vector as the same if the list of scalars matches VL
2270  // directly, without reordering.
2272  inversePermutation(ReorderIndices, Mask);
2273  if (VL.size() == Scalars.size())
2274  return IsSame(Scalars, Mask);
2275  if (VL.size() == ReuseShuffleIndices.size()) {
2276  ::addMask(Mask, ReuseShuffleIndices);
2277  return IsSame(Scalars, Mask);
2278  }
2279  return false;
2280  }
2281  return IsSame(Scalars, ReuseShuffleIndices);
2282  }
2283 
2284  /// \returns true if current entry has same operands as \p TE.
2285  bool hasEqualOperands(const TreeEntry &TE) const {
2286  if (TE.getNumOperands() != getNumOperands())
2287  return false;
2288  SmallBitVector Used(getNumOperands());
2289  for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
2290  unsigned PrevCount = Used.count();
2291  for (unsigned K = 0; K < E; ++K) {
2292  if (Used.test(K))
2293  continue;
2294  if (getOperand(K) == TE.getOperand(I)) {
2295  Used.set(K);
2296  break;
2297  }
2298  }
2299  // Check if we actually found the matching operand.
2300  if (PrevCount == Used.count())
2301  return false;
2302  }
2303  return true;
2304  }
2305 
2306  /// \return Final vectorization factor for the node. Defined by the total
2307  /// number of vectorized scalars, including those, used several times in the
2308  /// entry and counted in the \a ReuseShuffleIndices, if any.
2309  unsigned getVectorFactor() const {
2310  if (!ReuseShuffleIndices.empty())
2311  return ReuseShuffleIndices.size();
2312  return Scalars.size();
2313  };
2314 
2315  /// A vector of scalars.
2316  ValueList Scalars;
2317 
2318  /// The Scalars are vectorized into this value. It is initialized to Null.
2319  Value *VectorizedValue = nullptr;
2320 
2321  /// Do we need to gather this sequence or vectorize it
2322  /// (either with vector instruction or with scatter/gather
2323  /// intrinsics for store/load)?
2324  enum EntryState { Vectorize, ScatterVectorize, NeedToGather };
2325  EntryState State;
2326 
2327  /// Does this sequence require some shuffling?
2328  SmallVector<int, 4> ReuseShuffleIndices;
2329 
2330  /// Does this entry require reordering?
2331  SmallVector<unsigned, 4> ReorderIndices;
2332 
2333  /// Points back to the VectorizableTree.
2334  ///
2335  /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has
2336  /// to be a pointer and needs to be able to initialize the child iterator.
2337  /// Thus we need a reference back to the container to translate the indices
2338  /// to entries.
2339  VecTreeTy &Container;
2340 
2341  /// The TreeEntry index containing the user of this entry. We can actually
2342  /// have multiple users so the data structure is not truly a tree.
2343  SmallVector<EdgeInfo, 1> UserTreeIndices;
2344 
2345  /// The index of this treeEntry in VectorizableTree.
2346  int Idx = -1;
2347 
2348  private:
2349  /// The operands of each instruction in each lane Operands[op_index][lane].
2350  /// Note: This helps avoid the replication of the code that performs the
2351  /// reordering of operands during buildTree_rec() and vectorizeTree().
2353 
2354  /// The main/alternate instruction.
2355  Instruction *MainOp = nullptr;
2356  Instruction *AltOp = nullptr;
2357 
2358  public:
2359  /// Set this bundle's \p OpIdx'th operand to \p OpVL.
2360  void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
2361  if (Operands.size() < OpIdx + 1)
2362  Operands.resize(OpIdx + 1);
2363  assert(Operands[OpIdx].empty() && "Already resized?");
2364  assert(OpVL.size() <= Scalars.size() &&
2365  "Number of operands is greater than the number of scalars.");
2366  Operands[OpIdx].resize(OpVL.size());
2367  copy(OpVL, Operands[OpIdx].begin());
2368  }
2369 
2370  /// Set the operands of this bundle in their original order.
2371  void setOperandsInOrder() {
2372  assert(Operands.empty() && "Already initialized?");
2373  auto *I0 = cast<Instruction>(Scalars[0]);
2374  Operands.resize(I0->getNumOperands());
2375  unsigned NumLanes = Scalars.size();
2376  for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands();
2377  OpIdx != NumOperands; ++OpIdx) {
2378  Operands[OpIdx].resize(NumLanes);
2379  for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
2380  auto *I = cast<Instruction>(Scalars[Lane]);
2381  assert(I->getNumOperands() == NumOperands &&
2382  "Expected same number of operands");
2383  Operands[OpIdx][Lane] = I->getOperand(OpIdx);
2384  }
2385  }
2386  }
2387 
2388  /// Reorders operands of the node to the given mask \p Mask.
2389  void reorderOperands(ArrayRef<int> Mask) {
2390  for (ValueList &Operand : Operands)
2391  reorderScalars(Operand, Mask);
2392  }
2393 
2394  /// \returns the \p OpIdx operand of this TreeEntry.
2395  ValueList &getOperand(unsigned OpIdx) {
2396  assert(OpIdx < Operands.size() && "Off bounds");
2397  return Operands[OpIdx];
2398  }
2399 
2400  /// \returns the \p OpIdx operand of this TreeEntry.
2401  ArrayRef<Value *> getOperand(unsigned OpIdx) const {
2402  assert(OpIdx < Operands.size() && "Off bounds");
2403  return Operands[OpIdx];
2404  }
2405 
2406  /// \returns the number of operands.
2407  unsigned getNumOperands() const { return Operands.size(); }
2408 
2409  /// \return the single \p OpIdx operand.
2410  Value *getSingleOperand(unsigned OpIdx) const {
2411  assert(OpIdx < Operands.size() && "Off bounds");
2412  assert(!Operands[OpIdx].empty() && "No operand available");
2413  return Operands[OpIdx][0];
2414  }
2415 
2416  /// Some of the instructions in the list have alternate opcodes.
2417  bool isAltShuffle() const { return MainOp != AltOp; }
2418 
2419  bool isOpcodeOrAlt(Instruction *I) const {
2420  unsigned CheckedOpcode = I->getOpcode();
2421  return (getOpcode() == CheckedOpcode ||
2422  getAltOpcode() == CheckedOpcode);
2423  }
2424 
2425  /// Chooses the correct key for scheduling data. If \p Op has the same (or
2426  /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is
2427  /// \p OpValue.
2428  Value *isOneOf(Value *Op) const {
2429  auto *I = dyn_cast<Instruction>(Op);
2430  if (I && isOpcodeOrAlt(I))
2431  return Op;
2432  return MainOp;
2433  }
2434 
2435  void setOperations(const InstructionsState &S) {
2436  MainOp = S.MainOp;
2437  AltOp = S.AltOp;
2438  }
2439 
2440  Instruction *getMainOp() const {
2441  return MainOp;
2442  }
2443 
2444  Instruction *getAltOp() const {
2445  return AltOp;
2446  }
2447 
2448  /// The main/alternate opcodes for the list of instructions.
2449  unsigned getOpcode() const {
2450  return MainOp ? MainOp->getOpcode() : 0;
2451  }
2452 
2453  unsigned getAltOpcode() const {
2454  return AltOp ? AltOp->getOpcode() : 0;
2455  }
2456 
2457  /// When ReuseReorderShuffleIndices is empty it just returns position of \p
2458  /// V within vector of Scalars. Otherwise, try to remap on its reuse index.
2459  int findLaneForValue(Value *V) const {
2460  unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V));
2461  assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
2462  if (!ReorderIndices.empty())
2463  FoundLane = ReorderIndices[FoundLane];
2464  assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
2465  if (!ReuseShuffleIndices.empty()) {
2466  FoundLane = std::distance(ReuseShuffleIndices.begin(),
2467  find(ReuseShuffleIndices, FoundLane));
2468  }
2469  return FoundLane;
2470  }
2471 
2472 #ifndef NDEBUG
2473  /// Debug printer.
2474  LLVM_DUMP_METHOD void dump() const {
2475  dbgs() << Idx << ".\n";
2476  for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) {
2477  dbgs() << "Operand " << OpI << ":\n";
2478  for (const Value *V : Operands[OpI])
2479  dbgs().indent(2) << *V << "\n";
2480  }
2481  dbgs() << "Scalars: \n";
2482  for (Value *V : Scalars)
2483  dbgs().indent(2) << *V << "\n";
2484  dbgs() << "State: ";
2485  switch (State) {
2486  case Vectorize:
2487  dbgs() << "Vectorize\n";
2488  break;
2489  case ScatterVectorize:
2490  dbgs() << "ScatterVectorize\n";
2491  break;
2492  case NeedToGather:
2493  dbgs() << "NeedToGather\n";
2494  break;
2495  }
2496  dbgs() << "MainOp: ";
2497  if (MainOp)
2498  dbgs() << *MainOp << "\n";
2499  else
2500  dbgs() << "NULL\n";
2501  dbgs() << "AltOp: ";
2502  if (AltOp)
2503  dbgs() << *AltOp << "\n";
2504  else
2505  dbgs() << "NULL\n";
2506  dbgs() << "VectorizedValue: ";
2507  if (VectorizedValue)
2508  dbgs() << *VectorizedValue << "\n";
2509  else
2510  dbgs() << "NULL\n";
2511  dbgs() << "ReuseShuffleIndices: ";
2512  if (ReuseShuffleIndices.empty())
2513  dbgs() << "Empty";
2514  else
2515  for (int ReuseIdx : ReuseShuffleIndices)
2516  dbgs() << ReuseIdx << ", ";
2517  dbgs() << "\n";
2518  dbgs() << "ReorderIndices: ";
2519  for (unsigned ReorderIdx : ReorderIndices)
2520  dbgs() << ReorderIdx << ", ";
2521  dbgs() << "\n";
2522  dbgs() << "UserTreeIndices: ";
2523  for (const auto &EInfo : UserTreeIndices)
2524  dbgs() << EInfo << ", ";
2525  dbgs() << "\n";
2526  }
2527 #endif
2528  };
2529 
2530 #ifndef NDEBUG
2531  void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost,
2532  InstructionCost VecCost,
2533  InstructionCost ScalarCost) const {
2534  dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump();
2535  dbgs() << "SLP: Costs:\n";
2536  dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n";
2537  dbgs() << "SLP: VectorCost = " << VecCost << "\n";
2538  dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n";
2539  dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " <<
2540  ReuseShuffleCost + VecCost - ScalarCost << "\n";
2541  }
2542 #endif
2543 
2544  /// Create a new VectorizableTree entry.
2545  TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle,
2546  const InstructionsState &S,
2547  const EdgeInfo &UserTreeIdx,
2548  ArrayRef<int> ReuseShuffleIndices = None,
2549  ArrayRef<unsigned> ReorderIndices = None) {
2550  TreeEntry::EntryState EntryState =
2551  Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather;
2552  return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx,
2553  ReuseShuffleIndices, ReorderIndices);
2554  }
2555 
2556  TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
2557  TreeEntry::EntryState EntryState,
2558  Optional<ScheduleData *> Bundle,
2559  const InstructionsState &S,
2560  const EdgeInfo &UserTreeIdx,
2561  ArrayRef<int> ReuseShuffleIndices = None,
2562  ArrayRef<unsigned> ReorderIndices = None) {
2563  assert(((!Bundle && EntryState == TreeEntry::NeedToGather) ||
2564  (Bundle && EntryState != TreeEntry::NeedToGather)) &&
2565  "Need to vectorize gather entry?");
2566  VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree));
2567  TreeEntry *Last = VectorizableTree.back().get();
2568  Last->Idx = VectorizableTree.size() - 1;
2569  Last->State = EntryState;
2570  Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(),
2571  ReuseShuffleIndices.end());
2572  if (ReorderIndices.empty()) {
2573  Last->Scalars.assign(VL.begin(), VL.end());
2574  Last->setOperations(S);
2575  } else {
2576  // Reorder scalars and build final mask.
2577  Last->Scalars.assign(VL.size(), nullptr);
2578  transform(ReorderIndices, Last->Scalars.begin(),
2579  [VL](unsigned Idx) -> Value * {
2580  if (Idx >= VL.size())
2581  return UndefValue::get(VL.front()->getType());
2582  return VL[Idx];
2583  });
2584  InstructionsState S = getSameOpcode(Last->Scalars);
2585  Last->setOperations(S);
2586  Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end());
2587  }
2588  if (Last->State != TreeEntry::NeedToGather) {
2589  for (Value *V : VL) {
2590  assert(!getTreeEntry(V) && "Scalar already in tree!");
2591  ScalarToTreeEntry[V] = Last;
2592  }
2593  // Update the scheduler bundle to point to this TreeEntry.
2594  ScheduleData *BundleMember = *Bundle;
2595  assert((BundleMember || isa<PHINode>(S.MainOp) ||
2596  isVectorLikeInstWithConstOps(S.MainOp) ||
2597  doesNotNeedToSchedule(VL)) &&
2598  "Bundle and VL out of sync");
2599  if (BundleMember) {
2600  for (Value *V : VL) {
2601  if (doesNotNeedToBeScheduled(V))
2602  continue;
2603  assert(BundleMember && "Unexpected end of bundle.");
2604  BundleMember->TE = Last;
2605  BundleMember = BundleMember->NextInBundle;
2606  }
2607  }
2608  assert(!BundleMember && "Bundle and VL out of sync");
2609  } else {
2610  MustGather.insert(VL.begin(), VL.end());
2611  }
2612 
2613  if (UserTreeIdx.UserTE)
2614  Last->UserTreeIndices.push_back(UserTreeIdx);
2615 
2616  return Last;
2617  }
2618 
2619  /// -- Vectorization State --
2620  /// Holds all of the tree entries.
2621  TreeEntry::VecTreeTy VectorizableTree;
2622 
2623 #ifndef NDEBUG
2624  /// Debug printer.
2625  LLVM_DUMP_METHOD void dumpVectorizableTree() const {
2626  for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) {
2627  VectorizableTree[Id]->dump();
2628  dbgs() << "\n";
2629  }
2630  }
2631 #endif
2632 
2633  TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); }
2634 
2635  const TreeEntry *getTreeEntry(Value *V) const {
2636  return ScalarToTreeEntry.lookup(V);
2637  }
2638 
2639  /// Maps a specific scalar to its tree entry.
2640  SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry;
2641 
2642  /// Maps a value to the proposed vectorizable size.
2643  SmallDenseMap<Value *, unsigned> InstrElementSize;
2644 
2645  /// A list of scalars that we found that we need to keep as scalars.
2646  ValueSet MustGather;
2647 
2648  /// This POD struct describes one external user in the vectorized tree.
2649  struct ExternalUser {
2650  ExternalUser(Value *S, llvm::User *U, int L)
2651  : Scalar(S), User(U), Lane(L) {}
2652 
2653  // Which scalar in our function.
2654  Value *Scalar;
2655 
2656  // Which user that uses the scalar.
2657  llvm::User *User;
2658 
2659  // Which lane does the scalar belong to.
2660  int Lane;
2661  };
2662  using UserList = SmallVector<ExternalUser, 16>;
2663 
2664  /// Checks if two instructions may access the same memory.
2665  ///
2666  /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
2667  /// is invariant in the calling loop.
2668  bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
2669  Instruction *Inst2) {
2670  // First check if the result is already in the cache.
2671  AliasCacheKey key = std::make_pair(Inst1, Inst2);
2672  Optional<bool> &result = AliasCache[key];
2673  if (result) {
2674  return result.value();
2675  }
2676  bool aliased = true;
2677  if (Loc1.Ptr && isSimple(Inst1))
2678  aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1));
2679  // Store the result in the cache.
2680  result = aliased;
2681  return aliased;
2682  }
2683 
2684  using AliasCacheKey = std::pair<Instruction *, Instruction *>;
2685 
2686  /// Cache for alias results.
2687  /// TODO: consider moving this to the AliasAnalysis itself.
2689 
2690  // Cache for pointerMayBeCaptured calls inside AA. This is preserved
2691  // globally through SLP because we don't perform any action which
2692  // invalidates capture results.
2693  BatchAAResults BatchAA;
2694 
2695  /// Temporary store for deleted instructions. Instructions will be deleted
2696  /// eventually when the BoUpSLP is destructed. The deferral is required to
2697  /// ensure that there are no incorrect collisions in the AliasCache, which
2698  /// can happen if a new instruction is allocated at the same address as a
2699  /// previously deleted instruction.
2700  DenseSet<Instruction *> DeletedInstructions;
2701 
2702  /// Set of the instruction, being analyzed already for reductions.
2703  SmallPtrSet<Instruction *, 16> AnalyzedReductionsRoots;
2704 
2705  /// Set of hashes for the list of reduction values already being analyzed.
2706  DenseSet<size_t> AnalyzedReductionVals;
2707 
2708  /// A list of values that need to extracted out of the tree.
2709  /// This list holds pairs of (Internal Scalar : External User). External User
2710  /// can be nullptr, it means that this Internal Scalar will be used later,
2711  /// after vectorization.
2712  UserList ExternalUses;
2713 
2714  /// Values used only by @llvm.assume calls.
2716 
2717  /// Holds all of the instructions that we gathered.
2718  SetVector<Instruction *> GatherShuffleSeq;
2719 
2720  /// A list of blocks that we are going to CSE.
2721  SetVector<BasicBlock *> CSEBlocks;
2722 
2723  /// Contains all scheduling relevant data for an instruction.
2724  /// A ScheduleData either represents a single instruction or a member of an
2725  /// instruction bundle (= a group of instructions which is combined into a
2726  /// vector instruction).
2727  struct ScheduleData {
2728  // The initial value for the dependency counters. It means that the
2729  // dependencies are not calculated yet.
2730  enum { InvalidDeps = -1 };
2731 
2732  ScheduleData() = default;
2733 
2734  void init(int BlockSchedulingRegionID, Value *OpVal) {
2735  FirstInBundle = this;
2736  NextInBundle = nullptr;
2737  NextLoadStore = nullptr;
2738  IsScheduled = false;
2739  SchedulingRegionID = BlockSchedulingRegionID;
2740  clearDependencies();
2741  OpValue = OpVal;
2742  TE = nullptr;
2743  }
2744 
2745  /// Verify basic self consistency properties
2746  void verify() {
2747  if (hasValidDependencies()) {
2748  assert(UnscheduledDeps <= Dependencies && "invariant");
2749  } else {
2750  assert(UnscheduledDeps == Dependencies && "invariant");
2751  }
2752 
2753  if (IsScheduled) {
2754  assert(isSchedulingEntity() &&
2755  "unexpected scheduled state");
2756  for (const ScheduleData *BundleMember = this; BundleMember;
2757  BundleMember = BundleMember->NextInBundle) {
2758  assert(BundleMember->hasValidDependencies() &&
2759  BundleMember->UnscheduledDeps == 0 &&
2760  "unexpected scheduled state");
2761  assert((BundleMember == this || !BundleMember->IsScheduled) &&
2762  "only bundle is marked scheduled");
2763  }
2764  }
2765 
2766  assert(Inst->getParent() == FirstInBundle->Inst->getParent() &&
2767  "all bundle members must be in same basic block");
2768  }
2769 
2770  /// Returns true if the dependency information has been calculated.
2771  /// Note that depenendency validity can vary between instructions within
2772  /// a single bundle.
2773  bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
2774 
2775  /// Returns true for single instructions and for bundle representatives
2776  /// (= the head of a bundle).
2777  bool isSchedulingEntity() const { return FirstInBundle == this; }
2778 
2779  /// Returns true if it represents an instruction bundle and not only a
2780  /// single instruction.
2781  bool isPartOfBundle() const {
2782  return NextInBundle != nullptr || FirstInBundle != this || TE;
2783  }
2784 
2785  /// Returns true if it is ready for scheduling, i.e. it has no more
2786  /// unscheduled depending instructions/bundles.
2787  bool isReady() const {
2788  assert(isSchedulingEntity() &&
2789  "can't consider non-scheduling entity for ready list");
2790  return unscheduledDepsInBundle() == 0 && !IsScheduled;
2791  }
2792 
2793  /// Modifies the number of unscheduled dependencies for this instruction,
2794  /// and returns the number of remaining dependencies for the containing
2795  /// bundle.
2796  int incrementUnscheduledDeps(int Incr) {
2797  assert(hasValidDependencies() &&
2798  "increment of unscheduled deps would be meaningless");
2799  UnscheduledDeps += Incr;
2800  return FirstInBundle->unscheduledDepsInBundle();
2801  }
2802 
2803  /// Sets the number of unscheduled dependencies to the number of
2804  /// dependencies.
2805  void resetUnscheduledDeps() {
2806  UnscheduledDeps = Dependencies;
2807  }
2808 
2809  /// Clears all dependency information.
2810  void clearDependencies() {
2811  Dependencies = InvalidDeps;
2812  resetUnscheduledDeps();
2813  MemoryDependencies.clear();
2814  ControlDependencies.clear();
2815  }
2816 
2817  int unscheduledDepsInBundle() const {
2818  assert(isSchedulingEntity() && "only meaningful on the bundle");
2819  int Sum = 0;
2820  for (const ScheduleData *BundleMember = this; BundleMember;
2821  BundleMember = BundleMember->NextInBundle) {
2822  if (BundleMember->UnscheduledDeps == InvalidDeps)
2823  return InvalidDeps;
2824  Sum += BundleMember->UnscheduledDeps;
2825  }
2826  return Sum;
2827  }
2828 
2829  void dump(raw_ostream &os) const {
2830  if (!isSchedulingEntity()) {
2831  os << "/ " << *Inst;
2832  } else if (NextInBundle) {
2833  os << '[' << *Inst;
2834  ScheduleData *SD = NextInBundle;
2835  while (SD) {
2836  os << ';' << *SD->Inst;
2837  SD = SD->NextInBundle;
2838  }
2839  os << ']';
2840  } else {
2841  os << *Inst;
2842  }
2843  }
2844 
2845  Instruction *Inst = nullptr;
2846 
2847  /// Opcode of the current instruction in the schedule data.
2848  Value *OpValue = nullptr;
2849 
2850  /// The TreeEntry that this instruction corresponds to.
2851  TreeEntry *TE = nullptr;
2852 
2853  /// Points to the head in an instruction bundle (and always to this for
2854  /// single instructions).
2855  ScheduleData *FirstInBundle = nullptr;
2856 
2857  /// Single linked list of all instructions in a bundle. Null if it is a
2858  /// single instruction.
2859  ScheduleData *NextInBundle = nullptr;
2860 
2861  /// Single linked list of all memory instructions (e.g. load, store, call)
2862  /// in the block - until the end of the scheduling region.
2863  ScheduleData *NextLoadStore = nullptr;
2864 
2865  /// The dependent memory instructions.
2866  /// This list is derived on demand in calculateDependencies().
2867  SmallVector<ScheduleData *, 4> MemoryDependencies;
2868 
2869  /// List of instructions which this instruction could be control dependent
2870  /// on. Allowing such nodes to be scheduled below this one could introduce
2871  /// a runtime fault which didn't exist in the original program.
2872  /// ex: this is a load or udiv following a readonly call which inf loops
2873  SmallVector<ScheduleData *, 4> ControlDependencies;
2874 
2875  /// This ScheduleData is in the current scheduling region if this matches
2876  /// the current SchedulingRegionID of BlockScheduling.
2877  int SchedulingRegionID = 0;
2878 
2879  /// Used for getting a "good" final ordering of instructions.
2880  int SchedulingPriority = 0;
2881 
2882  /// The number of dependencies. Constitutes of the number of users of the
2883  /// instruction plus the number of dependent memory instructions (if any).
2884  /// This value is calculated on demand.
2885  /// If InvalidDeps, the number of dependencies is not calculated yet.
2886  int Dependencies = InvalidDeps;
2887 
2888  /// The number of dependencies minus the number of dependencies of scheduled
2889  /// instructions. As soon as this is zero, the instruction/bundle gets ready
2890  /// for scheduling.
2891  /// Note that this is negative as long as Dependencies is not calculated.
2892  int UnscheduledDeps = InvalidDeps;
2893 
2894  /// True if this instruction is scheduled (or considered as scheduled in the
2895  /// dry-run).
2896  bool IsScheduled = false;
2897  };
2898 
2899 #ifndef NDEBUG
2900  friend inline raw_ostream &operator<<(raw_ostream &os,
2901  const BoUpSLP::ScheduleData &SD) {
2902  SD.dump(os);
2903  return os;
2904  }
2905 #endif
2906 
2907  friend struct GraphTraits<BoUpSLP *>;
2908  friend struct DOTGraphTraits<BoUpSLP *>;
2909 
2910  /// Contains all scheduling data for a basic block.
2911  /// It does not schedules instructions, which are not memory read/write
2912  /// instructions and their operands are either constants, or arguments, or
2913  /// phis, or instructions from others blocks, or their users are phis or from
2914  /// the other blocks. The resulting vector instructions can be placed at the
2915  /// beginning of the basic block without scheduling (if operands does not need
2916  /// to be scheduled) or at the end of the block (if users are outside of the
2917  /// block). It allows to save some compile time and memory used by the
2918  /// compiler.
2919  /// ScheduleData is assigned for each instruction in between the boundaries of
2920  /// the tree entry, even for those, which are not part of the graph. It is
2921  /// required to correctly follow the dependencies between the instructions and
2922  /// their correct scheduling. The ScheduleData is not allocated for the
2923  /// instructions, which do not require scheduling, like phis, nodes with
2924  /// extractelements/insertelements only or nodes with instructions, with
2925  /// uses/operands outside of the block.
2926  struct BlockScheduling {
2927  BlockScheduling(BasicBlock *BB)
2928  : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {}
2929 
2930  void clear() {
2931  ReadyInsts.clear();
2932  ScheduleStart = nullptr;
2933  ScheduleEnd = nullptr;
2934  FirstLoadStoreInRegion = nullptr;
2935  LastLoadStoreInRegion = nullptr;
2936  RegionHasStackSave = false;
2937 
2938  // Reduce the maximum schedule region size by the size of the
2939  // previous scheduling run.
2940  ScheduleRegionSizeLimit -= ScheduleRegionSize;
2941  if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
2942  ScheduleRegionSizeLimit = MinScheduleRegionSize;
2943  ScheduleRegionSize = 0;
2944 
2945  // Make a new scheduling region, i.e. all existing ScheduleData is not
2946  // in the new region yet.
2947  ++SchedulingRegionID;
2948  }
2949 
2950  ScheduleData *getScheduleData(Instruction *I) {
2951  if (BB != I->getParent())
2952  // Avoid lookup if can't possibly be in map.
2953  return nullptr;
2954  ScheduleData *SD = ScheduleDataMap.lookup(I);
2955  if (SD && isInSchedulingRegion(SD))
2956  return SD;
2957  return nullptr;
2958  }
2959 
2960  ScheduleData *getScheduleData(Value *V) {
2961  if (auto *I = dyn_cast<Instruction>(V))
2962  return getScheduleData(I);
2963  return nullptr;
2964  }
2965 
2966  ScheduleData *getScheduleData(Value *V, Value *Key) {
2967  if (V == Key)
2968  return getScheduleData(V);
2969  auto I = ExtraScheduleDataMap.find(V);
2970  if (I != ExtraScheduleDataMap.end()) {
2971  ScheduleData *SD = I->second.lookup(Key);
2972  if (SD && isInSchedulingRegion(SD))
2973  return SD;
2974  }
2975  return nullptr;
2976  }
2977 
2978  bool isInSchedulingRegion(ScheduleData *SD) const {
2979  return SD->SchedulingRegionID == SchedulingRegionID;
2980  }
2981 
2982  /// Marks an instruction as scheduled and puts all dependent ready
2983  /// instructions into the ready-list.
2984  template <typename ReadyListType>
2985  void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
2986  SD->IsScheduled = true;
2987  LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n");
2988 
2989  for (ScheduleData *BundleMember = SD; BundleMember;
2990  BundleMember = BundleMember->NextInBundle) {
2991  if (BundleMember->Inst != BundleMember->OpValue)
2992  continue;
2993 
2994  // Handle the def-use chain dependencies.
2995 
2996  // Decrement the unscheduled counter and insert to ready list if ready.
2997  auto &&DecrUnsched = [this, &ReadyList](Instruction *I) {
2998  doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) {
2999  if (OpDef && OpDef->hasValidDependencies() &&
3000  OpDef->incrementUnscheduledDeps(-1) == 0) {
3001  // There are no more unscheduled dependencies after
3002  // decrementing, so we can put the dependent instruction
3003  // into the ready list.
3004  ScheduleData *DepBundle = OpDef->FirstInBundle;
3005  assert(!DepBundle->IsScheduled &&
3006  "already scheduled bundle gets ready");
3007  ReadyList.insert(DepBundle);
3008  LLVM_DEBUG(dbgs()
3009  << "SLP: gets ready (def): " << *DepBundle << "\n");
3010  }
3011  });
3012  };
3013 
3014  // If BundleMember is a vector bundle, its operands may have been
3015  // reordered during buildTree(). We therefore need to get its operands
3016  // through the TreeEntry.
3017  if (TreeEntry *TE = BundleMember->TE) {
3018  // Need to search for the lane since the tree entry can be reordered.
3019  int Lane = std::distance(TE->Scalars.begin(),
3020  find(TE->Scalars, BundleMember->Inst));
3021  assert(Lane >= 0 && "Lane not set");
3022 
3023  // Since vectorization tree is being built recursively this assertion
3024  // ensures that the tree entry has all operands set before reaching
3025  // this code. Couple of exceptions known at the moment are extracts
3026  // where their second (immediate) operand is not added. Since
3027  // immediates do not affect scheduler behavior this is considered
3028  // okay.
3029  auto *In = BundleMember->Inst;
3030  assert(In &&
3031  (isa<ExtractValueInst, ExtractElementInst>(In) ||
3032  In->getNumOperands() == TE->getNumOperands()) &&
3033  "Missed TreeEntry operands?");
3034  (void)In; // fake use to avoid build failure when assertions disabled
3035 
3036  for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands();
3037  OpIdx != NumOperands; ++OpIdx)
3038  if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane]))
3039  DecrUnsched(I);
3040  } else {
3041  // If BundleMember is a stand-alone instruction, no operand reordering
3042  // has taken place, so we directly access its operands.
3043  for (Use &U : BundleMember->Inst->operands())
3044  if (auto *I = dyn_cast<Instruction>(U.get()))
3045  DecrUnsched(I);
3046  }
3047  // Handle the memory dependencies.
3048  for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
3049  if (MemoryDepSD->hasValidDependencies() &&
3050  MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
3051  // There are no more unscheduled dependencies after decrementing,
3052  // so we can put the dependent instruction into the ready list.
3053  ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
3054  assert(!DepBundle->IsScheduled &&
3055  "already scheduled bundle gets ready");
3056  ReadyList.insert(DepBundle);
3057  LLVM_DEBUG(dbgs()
3058  << "SLP: gets ready (mem): " << *DepBundle << "\n");
3059  }
3060  }
3061  // Handle the control dependencies.
3062  for (ScheduleData *DepSD : BundleMember->ControlDependencies) {
3063  if (DepSD->incrementUnscheduledDeps(-1) == 0) {
3064  // There are no more unscheduled dependencies after decrementing,
3065  // so we can put the dependent instruction into the ready list.
3066  ScheduleData *DepBundle = DepSD->FirstInBundle;
3067  assert(!DepBundle->IsScheduled &&
3068  "already scheduled bundle gets ready");
3069  ReadyList.insert(DepBundle);
3070  LLVM_DEBUG(dbgs()
3071  << "SLP: gets ready (ctl): " << *DepBundle << "\n");
3072  }
3073  }
3074 
3075  }
3076  }
3077 
3078  /// Verify basic self consistency properties of the data structure.
3079  void verify() {
3080  if (!ScheduleStart)
3081  return;
3082 
3083  assert(ScheduleStart->getParent() == ScheduleEnd->getParent() &&
3084  ScheduleStart->comesBefore(ScheduleEnd) &&
3085  "Not a valid scheduling region?");
3086 
3087  for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3088  auto *SD = getScheduleData(I);
3089  if (!SD)
3090  continue;
3091  assert(isInSchedulingRegion(SD) &&
3092  "primary schedule data not in window?");
3093  assert(isInSchedulingRegion(SD->FirstInBundle) &&
3094  "entire bundle in window!");
3095  (void)SD;
3096  doForAllOpcodes(I, [](ScheduleData *SD) { SD->verify(); });
3097  }
3098 
3099  for (auto *SD : ReadyInsts) {
3100  assert(SD->isSchedulingEntity() && SD->isReady() &&
3101  "item in ready list not ready?");
3102  (void)SD;
3103  }
3104  }
3105 
3106  void doForAllOpcodes(Value *V,
3107  function_ref<void(ScheduleData *SD)> Action) {
3108  if (ScheduleData *SD = getScheduleData(V))
3109  Action(SD);
3110  auto I = ExtraScheduleDataMap.find(V);
3111  if (I != ExtraScheduleDataMap.end())
3112  for (auto &P : I->second)
3113  if (isInSchedulingRegion(P.second))
3114  Action(P.second);
3115  }
3116 
3117  /// Put all instructions into the ReadyList which are ready for scheduling.
3118  template <typename ReadyListType>
3119  void initialFillReadyList(ReadyListType &ReadyList) {
3120  for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3121  doForAllOpcodes(I, [&](ScheduleData *SD) {
3122  if (SD->isSchedulingEntity() && SD->hasValidDependencies() &&
3123  SD->isReady()) {
3124  ReadyList.insert(SD);
3125  LLVM_DEBUG(dbgs()
3126  << "SLP: initially in ready list: " << *SD << "\n");
3127  }
3128  });
3129  }
3130  }
3131 
3132  /// Build a bundle from the ScheduleData nodes corresponding to the
3133  /// scalar instruction for each lane.
3134  ScheduleData *buildBundle(ArrayRef<Value *> VL);
3135 
3136  /// Checks if a bundle of instructions can be scheduled, i.e. has no
3137  /// cyclic dependencies. This is only a dry-run, no instructions are
3138  /// actually moved at this stage.
3139  /// \returns the scheduling bundle. The returned Optional value is non-None
3140  /// if \p VL is allowed to be scheduled.
3142  tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
3143  const InstructionsState &S);
3144 
3145  /// Un-bundles a group of instructions.
3146  void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
3147 
3148  /// Allocates schedule data chunk.
3149  ScheduleData *allocateScheduleDataChunks();
3150 
3151  /// Extends the scheduling region so that V is inside the region.
3152  /// \returns true if the region size is within the limit.
3153  bool extendSchedulingRegion(Value *V, const InstructionsState &S);
3154 
3155  /// Initialize the ScheduleData structures for new instructions in the
3156  /// scheduling region.
3157  void initScheduleData(Instruction *FromI, Instruction *ToI,
3158  ScheduleData *PrevLoadStore,
3159  ScheduleData *NextLoadStore);
3160 
3161  /// Updates the dependency information of a bundle and of all instructions/
3162  /// bundles which depend on the original bundle.
3163  void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
3164  BoUpSLP *SLP);
3165 
3166  /// Sets all instruction in the scheduling region to un-scheduled.
3167  void resetSchedule();
3168 
3169  BasicBlock *BB;
3170 
3171  /// Simple memory allocation for ScheduleData.
3172  std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
3173 
3174  /// The size of a ScheduleData array in ScheduleDataChunks.
3175  int ChunkSize;
3176 
3177  /// The allocator position in the current chunk, which is the last entry
3178  /// of ScheduleDataChunks.
3179  int ChunkPos;
3180 
3181  /// Attaches ScheduleData to Instruction.
3182  /// Note that the mapping survives during all vectorization iterations, i.e.
3183  /// ScheduleData structures are recycled.
3185 
3186  /// Attaches ScheduleData to Instruction with the leading key.
3188  ExtraScheduleDataMap;
3189 
3190  /// The ready-list for scheduling (only used for the dry-run).
3191  SetVector<ScheduleData *> ReadyInsts;
3192 
3193  /// The first instruction of the scheduling region.
3194  Instruction *ScheduleStart = nullptr;
3195 
3196  /// The first instruction _after_ the scheduling region.
3197  Instruction *ScheduleEnd = nullptr;
3198 
3199  /// The first memory accessing instruction in the scheduling region
3200  /// (can be null).
3201  ScheduleData *FirstLoadStoreInRegion = nullptr;
3202 
3203  /// The last memory accessing instruction in the scheduling region
3204  /// (can be null).
3205  ScheduleData *LastLoadStoreInRegion = nullptr;
3206 
3207  /// Is there an llvm.stacksave or llvm.stackrestore in the scheduling
3208  /// region? Used to optimize the dependence calculation for the
3209  /// common case where there isn't.
3210  bool RegionHasStackSave = false;
3211 
3212  /// The current size of the scheduling region.
3213  int ScheduleRegionSize = 0;
3214 
3215  /// The maximum size allowed for the scheduling region.
3216  int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget;
3217 
3218  /// The ID of the scheduling region. For a new vectorization iteration this
3219  /// is incremented which "removes" all ScheduleData from the region.
3220  /// Make sure that the initial SchedulingRegionID is greater than the
3221  /// initial SchedulingRegionID in ScheduleData (which is 0).
3222  int SchedulingRegionID = 1;
3223  };
3224 
3225  /// Attaches the BlockScheduling structures to basic blocks.
3227 
3228  /// Performs the "real" scheduling. Done before vectorization is actually
3229  /// performed in a basic block.
3230  void scheduleBlock(BlockScheduling *BS);
3231 
3232  /// List of users to ignore during scheduling and that don't need extracting.
3233  const SmallDenseSet<Value *> *UserIgnoreList = nullptr;
3234 
3235  /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
3236  /// sorted SmallVectors of unsigned.
3237  struct OrdersTypeDenseMapInfo {
3238  static OrdersType getEmptyKey() {
3239  OrdersType V;
3240  V.push_back(~1U);
3241  return V;
3242  }
3243 
3244  static OrdersType getTombstoneKey() {
3245  OrdersType V;
3246  V.push_back(~2U);
3247  return V;
3248  }
3249 
3250  static unsigned getHashValue(const OrdersType &V) {
3251  return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
3252  }
3253 
3254  static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) {
3255  return LHS == RHS;
3256  }
3257  };
3258 
3259  // Analysis and block reference.
3260  Function *F;
3261  ScalarEvolution *SE;
3263  TargetLibraryInfo *TLI;
3264  LoopInfo *LI;
3265  DominatorTree *DT;
3266  AssumptionCache *AC;
3267  DemandedBits *DB;
3268  const DataLayout *DL;
3270 
3271  unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
3272  unsigned MinVecRegSize; // Set by cl::opt (default: 128).
3273 
3274  /// Instruction builder to construct the vectorized tree.
3276 
3277  /// A map of scalar integer values to the smallest bit width with which they
3278  /// can legally be represented. The values map to (width, signed) pairs,
3279  /// where "width" indicates the minimum bit width and "signed" is True if the
3280  /// value must be signed-extended, rather than zero-extended, back to its
3281  /// original width.
3283 };
3284 
3285 } // end namespace slpvectorizer
3286 
3287 template <> struct GraphTraits<BoUpSLP *> {
3288  using TreeEntry = BoUpSLP::TreeEntry;
3289 
3290  /// NodeRef has to be a pointer per the GraphWriter.
3291  using NodeRef = TreeEntry *;
3292 
3293  using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy;
3294 
3295  /// Add the VectorizableTree to the index iterator to be able to return
3296  /// TreeEntry pointers.
3297  struct ChildIteratorType
3298  : public iterator_adaptor_base<
3299  ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> {
3301 
3303  ContainerTy &VT)
3304  : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {}
3305 
3306  NodeRef operator*() { return I->UserTE; }
3307  };
3308 
3309  static NodeRef getEntryNode(BoUpSLP &R) {
3310  return R.VectorizableTree[0].get();
3311  }
3312 
3313  static ChildIteratorType child_begin(NodeRef N) {
3314  return {N->UserTreeIndices.begin(), N->Container};
3315  }
3316 
3317  static ChildIteratorType child_end(NodeRef N) {
3318  return {N->UserTreeIndices.end(), N->Container};
3319  }
3320 
3321  /// For the node iterator we just need to turn the TreeEntry iterator into a
3322  /// TreeEntry* iterator so that it dereferences to NodeRef.
3323  class nodes_iterator {
3324  using ItTy = ContainerTy::iterator;
3325  ItTy It;
3326 
3327  public:
3328  nodes_iterator(const ItTy &It2) : It(It2) {}
3329  NodeRef operator*() { return It->get(); }
3330  nodes_iterator operator++() {
3331  ++It;
3332  return *this;
3333  }
3334  bool operator!=(const nodes_iterator &N2) const { return N2.It != It; }
3335  };
3336 
3337  static nodes_iterator nodes_begin(BoUpSLP *R) {
3338  return nodes_iterator(R->VectorizableTree.begin());
3339  }
3340 
3341  static nodes_iterator nodes_end(BoUpSLP *R) {
3342  return nodes_iterator(R->VectorizableTree.end());
3343  }
3344 
3345  static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); }
3346 };
3347 
3348 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
3349  using TreeEntry = BoUpSLP::TreeEntry;
3350 
3352 
3353  std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) {
3354  std::string Str;
3355  raw_string_ostream OS(Str);
3356  if (isSplat(Entry->Scalars))
3357  OS << "<splat> ";
3358  for (auto *V : Entry->Scalars) {
3359  OS << *V;
3360  if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) {
3361  return EU.Scalar == V;
3362  }))
3363  OS << " <extract>";
3364  OS << "\n";
3365  }
3366  return Str;
3367  }
3368 
3369  static std::string getNodeAttributes(const TreeEntry *Entry,
3370  const BoUpSLP *) {
3371  if (Entry->State == TreeEntry::NeedToGather)
3372  return "color=red";
3373  return "";
3374  }
3375 };
3376 
3377 } // end namespace llvm
3378 
3379 BoUpSLP::~BoUpSLP() {
3380  SmallVector<WeakTrackingVH> DeadInsts;
3381  for (auto *I : DeletedInstructions) {
3382  for (Use &U : I->operands()) {
3383  auto *Op = dyn_cast<Instruction>(U.get());
3384  if (Op && !DeletedInstructions.count(Op) && Op->hasOneUser() &&
3386  DeadInsts.emplace_back(Op);
3387  }
3388  I->dropAllReferences();
3389  }
3390  for (auto *I : DeletedInstructions) {
3391  assert(I->use_empty() &&
3392  "trying to erase instruction with users.");
3393  I->eraseFromParent();
3394  }
3395 
3396  // Cleanup any dead scalar code feeding the vectorized instructions
3398 
3399 #ifdef EXPENSIVE_CHECKS
3400  // If we could guarantee that this call is not extremely slow, we could
3401  // remove the ifdef limitation (see PR47712).
3402  assert(!verifyFunction(*F, &dbgs()));
3403 #endif
3404 }
3405 
3406 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses
3407 /// contains original mask for the scalars reused in the node. Procedure
3408 /// transform this mask in accordance with the given \p Mask.
3410  assert(!Mask.empty() && Reuses.size() == Mask.size() &&
3411  "Expected non-empty mask.");
3412  SmallVector<int> Prev(Reuses.begin(), Reuses.end());
3413  Prev.swap(Reuses);
3414  for (unsigned I = 0, E = Prev.size(); I < E; ++I)
3415  if (Mask[I] != UndefMaskElem)
3416  Reuses[Mask[I]] = Prev[I];
3417 }
3418 
3419 /// Reorders the given \p Order according to the given \p Mask. \p Order - is
3420 /// the original order of the scalars. Procedure transforms the provided order
3421 /// in accordance with the given \p Mask. If the resulting \p Order is just an
3422 /// identity order, \p Order is cleared.
3424  assert(!Mask.empty() && "Expected non-empty mask.");
3425  SmallVector<int> MaskOrder;
3426  if (Order.empty()) {
3427  MaskOrder.resize(Mask.size());
3428  std::iota(MaskOrder.begin(), MaskOrder.end(), 0);
3429  } else {
3430  inversePermutation(Order, MaskOrder);
3431  }
3432  reorderReuses(MaskOrder, Mask);
3433  if (ShuffleVectorInst::isIdentityMask(MaskOrder)) {
3434  Order.clear();
3435  return;
3436  }
3437  Order.assign(Mask.size(), Mask.size());
3438  for (unsigned I = 0, E = Mask.size(); I < E; ++I)
3439  if (MaskOrder[I] != UndefMaskElem)
3440  Order[MaskOrder[I]] = I;
3441  fixupOrderingIndices(Order);
3442 }
3443 
3445 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
3446  assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only.");
3447  unsigned NumScalars = TE.Scalars.size();
3448  OrdersType CurrentOrder(NumScalars, NumScalars);
3449  SmallVector<int> Positions;
3450  SmallBitVector UsedPositions(NumScalars);
3451  const TreeEntry *STE = nullptr;
3452  // Try to find all gathered scalars that are gets vectorized in other
3453  // vectorize node. Here we can have only one single tree vector node to
3454  // correctly identify order of the gathered scalars.
3455  for (unsigned I = 0; I < NumScalars; ++I) {
3456  Value *V = TE.Scalars[I];
3457  if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V))
3458  continue;
3459  if (const auto *LocalSTE = getTreeEntry(V)) {
3460  if (!STE)
3461  STE = LocalSTE;
3462  else if (STE != LocalSTE)
3463  // Take the order only from the single vector node.
3464  return None;
3465  unsigned Lane =
3466  std::distance(STE->Scalars.begin(), find(STE->Scalars, V));
3467  if (Lane >= NumScalars)
3468  return None;
3469  if (CurrentOrder[Lane] != NumScalars) {
3470  if (Lane != I)
3471  continue;
3472  UsedPositions.reset(CurrentOrder[Lane]);
3473  }
3474  // The partial identity (where only some elements of the gather node are
3475  // in the identity order) is good.
3476  CurrentOrder[Lane] = I;
3477  UsedPositions.set(I);
3478  }
3479  }
3480  // Need to keep the order if we have a vector entry and at least 2 scalars or
3481  // the vectorized entry has just 2 scalars.
3482  if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) {
3483  auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) {
3484  for (unsigned I = 0; I < NumScalars; ++I)
3485  if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars)
3486  return false;
3487  return true;
3488  };
3489  if (IsIdentityOrder(CurrentOrder)) {
3490  CurrentOrder.clear();
3491  return CurrentOrder;
3492  }
3493  auto *It = CurrentOrder.begin();
3494  for (unsigned I = 0; I < NumScalars;) {
3495  if (UsedPositions.test(I)) {
3496  ++I;
3497  continue;
3498  }
3499  if (*It == NumScalars) {
3500  *It = I;
3501  ++I;
3502  }
3503  ++It;
3504  }
3505  return CurrentOrder;
3506  }
3507  return None;
3508 }
3509 
3510 namespace {
3511 /// Tracks the state we can represent the loads in the given sequence.
3512 enum class LoadsState { Gather, Vectorize, ScatterVectorize };
3513 } // anonymous namespace
3514 
3515 /// Checks if the given array of loads can be represented as a vectorized,
3516 /// scatter or just simple gather.
3517 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0,
3518  const TargetTransformInfo &TTI,
3519  const DataLayout &DL, ScalarEvolution &SE,
3520  LoopInfo &LI,
3522  SmallVectorImpl<Value *> &PointerOps) {
3523  // Check that a vectorized load would load the same memory as a scalar
3524  // load. For example, we don't want to vectorize loads that are smaller
3525  // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
3526  // treats loading/storing it as an i8 struct. If we vectorize loads/stores
3527  // from such a struct, we read/write packed bits disagreeing with the
3528  // unvectorized version.
3529  Type *ScalarTy = VL0->getType();
3530 
3531  if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy))
3532  return LoadsState::Gather;
3533 
3534  // Make sure all loads in the bundle are simple - we can't vectorize
3535  // atomic or volatile loads.
3536  PointerOps.clear();
3537  PointerOps.resize(VL.size());
3538  auto *POIter = PointerOps.begin();
3539  for (Value *V : VL) {
3540  auto *L = cast<LoadInst>(V);
3541  if (!L->isSimple())
3542  return LoadsState::Gather;
3543  *POIter = L->getPointerOperand();
3544  ++POIter;
3545  }
3546 
3547  Order.clear();
3548  // Check the order of pointer operands or that all pointers are the same.
3549  bool IsSorted = sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order);
3550  if (IsSorted || all_of(PointerOps, [&PointerOps](Value *P) {
3551  if (getUnderlyingObject(P) != getUnderlyingObject(PointerOps.front()))
3552  return false;
3553  auto *GEP = dyn_cast<GetElementPtrInst>(P);
3554  if (!GEP)
3555  return false;
3556  auto *GEP0 = cast<GetElementPtrInst>(PointerOps.front());
3557  return GEP->getNumOperands() == 2 &&
3558  ((isConstant(GEP->getOperand(1)) &&
3559  isConstant(GEP0->getOperand(1))) ||
3560  getSameOpcode({GEP->getOperand(1), GEP0->getOperand(1)})
3561  .getOpcode());
3562  })) {
3563  if (IsSorted) {
3564  Value *Ptr0;
3565  Value *PtrN;
3566  if (Order.empty()) {
3567  Ptr0 = PointerOps.front();
3568  PtrN = PointerOps.back();
3569  } else {
3570  Ptr0 = PointerOps[Order.front()];
3571  PtrN = PointerOps[Order.back()];
3572  }
3573  Optional<int> Diff =
3574  getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE);
3575  // Check that the sorted loads are consecutive.
3576  if (static_cast<unsigned>(*Diff) == VL.size() - 1)
3577  return LoadsState::Vectorize;
3578  }
3579  // TODO: need to improve analysis of the pointers, if not all of them are
3580  // GEPs or have > 2 operands, we end up with a gather node, which just
3581  // increases the cost.
3582  Loop *L = LI.getLoopFor(cast<LoadInst>(VL0)->getParent());
3583  bool ProfitableGatherPointers =
3584  static_cast<unsigned>(count_if(PointerOps, [L](Value *V) {
3585  return L && L->isLoopInvariant(V);
3586  })) <= VL.size() / 2 && VL.size() > 2;
3587  if (ProfitableGatherPointers || all_of(PointerOps, [IsSorted](Value *P) {
3588  auto *GEP = dyn_cast<GetElementPtrInst>(P);
3589  return (IsSorted && !GEP && doesNotNeedToBeScheduled(P)) ||
3590  (GEP && GEP->getNumOperands() == 2);
3591  })) {
3592  Align CommonAlignment = cast<LoadInst>(VL0)->getAlign();
3593  for (Value *V : VL)
3594  CommonAlignment =
3595  std::min(CommonAlignment, cast<LoadInst>(V)->getAlign());
3596  auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
3597  if (TTI.isLegalMaskedGather(VecTy, CommonAlignment) &&
3598  !TTI.forceScalarizeMaskedGather(VecTy, CommonAlignment))
3599  return LoadsState::ScatterVectorize;
3600  }
3601  }
3602 
3603  return LoadsState::Gather;
3604 }
3605 
3607  const DataLayout &DL, ScalarEvolution &SE,
3608  SmallVectorImpl<unsigned> &SortedIndices) {
3610  VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
3611  "Expected list of pointer operands.");
3612  // Map from bases to a vector of (Ptr, Offset, OrigIdx), which we insert each
3613  // Ptr into, sort and return the sorted indices with values next to one
3614  // another.
3616  Bases[VL[0]].push_back(std::make_tuple(VL[0], 0U, 0U));
3617 
3618  unsigned Cnt = 1;
3619  for (Value *Ptr : VL.drop_front()) {
3620  bool Found = any_of(Bases, [&](auto &Base) {
3621  Optional<int> Diff =
3622  getPointersDiff(ElemTy, Base.first, ElemTy, Ptr, DL, SE,
3623  /*StrictCheck=*/true);
3624  if (!Diff)
3625  return false;
3626 
3627  Base.second.emplace_back(Ptr, *Diff, Cnt++);
3628  return true;
3629  });
3630 
3631  if (!Found) {
3632  // If we haven't found enough to usefully cluster, return early.
3633  if (Bases.size() > VL.size() / 2 - 1)
3634  return false;
3635 
3636  // Not found already - add a new Base
3637  Bases[Ptr].emplace_back(Ptr, 0, Cnt++);
3638  }
3639  }
3640 
3641  // For each of the bases sort the pointers by Offset and check if any of the
3642  // base become consecutively allocated.
3643  bool AnyConsecutive = false;
3644  for (auto &Base : Bases) {
3645  auto &Vec = Base.second;
3646  if (Vec.size() > 1) {
3647  llvm::stable_sort(Vec, [](const std::tuple<Value *, int, unsigned> &X,
3648  const std::tuple<Value *, int, unsigned> &Y) {
3649  return std::get<1>(X) < std::get<1>(Y);
3650  });
3651  int InitialOffset = std::get<1>(Vec[0]);
3652  AnyConsecutive |= all_of(enumerate(Vec), [InitialOffset](auto &P) {
3653  return std::get<1>(P.value()) == int(P.index()) + InitialOffset;
3654  });
3655  }
3656  }
3657 
3658  // Fill SortedIndices array only if it looks worth-while to sort the ptrs.
3659  SortedIndices.clear();
3660  if (!AnyConsecutive)
3661  return false;
3662 
3663  for (auto &Base : Bases) {
3664  for (auto &T : Base.second)
3665  SortedIndices.push_back(std::get<2>(T));
3666  }
3667 
3668  assert(SortedIndices.size() == VL.size() &&
3669  "Expected SortedIndices to be the size of VL");
3670  return true;
3671 }
3672 
3674 BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) {
3675  assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only.");
3676  Type *ScalarTy = TE.Scalars[0]->getType();
3677 
3678  SmallVector<Value *> Ptrs;
3679  Ptrs.reserve(TE.Scalars.size());
3680  for (Value *V : TE.Scalars) {
3681  auto *L = dyn_cast<LoadInst>(V);
3682  if (!L || !L->isSimple())
3683  return None;
3684  Ptrs.push_back(L->getPointerOperand());
3685  }
3686 
3687  BoUpSLP::OrdersType Order;
3688  if (clusterSortPtrAccesses(Ptrs, ScalarTy, *DL, *SE, Order))
3689  return Order;
3690  return None;
3691 }
3692 
3693 Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
3694  bool TopToBottom) {
3695  // No need to reorder if need to shuffle reuses, still need to shuffle the
3696  // node.
3697  if (!TE.ReuseShuffleIndices.empty()) {
3698  // Check if reuse shuffle indices can be improved by reordering.
3699  // For this, check that reuse mask is "clustered", i.e. each scalar values
3700  // is used once in each submask of size <number_of_scalars>.
3701  // Example: 4 scalar values.
3702  // ReuseShuffleIndices mask: 0, 1, 2, 3, 3, 2, 0, 1 - clustered.
3703  // 0, 1, 2, 3, 3, 3, 1, 0 - not clustered, because
3704  // element 3 is used twice in the second submask.
3705  unsigned Sz = TE.Scalars.size();
3706  if (!ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices,
3707  Sz))
3708  return None;
3709  unsigned VF = TE.getVectorFactor();
3710  // Try build correct order for extractelement instructions.
3711  SmallVector<int> ReusedMask(TE.ReuseShuffleIndices.begin(),
3712  TE.ReuseShuffleIndices.end());
3713  if (TE.getOpcode() == Instruction::ExtractElement && !TE.isAltShuffle() &&
3714  all_of(TE.Scalars, [Sz](Value *V) {
3715  Optional<unsigned> Idx = getExtractIndex(cast<Instruction>(V));
3716  return Idx && *Idx < Sz;
3717  })) {
3718  SmallVector<int> ReorderMask(Sz, UndefMaskElem);
3719  if (TE.ReorderIndices.empty())
3720  std::iota(ReorderMask.begin(), ReorderMask.end(), 0);
3721  else
3722  inversePermutation(TE.ReorderIndices, ReorderMask);
3723  for (unsigned I = 0; I < VF; ++I) {
3724  int &Idx = ReusedMask[I];
3725  if (Idx == UndefMaskElem)
3726  continue;
3727  Value *V = TE.Scalars[ReorderMask[Idx]];
3728  Optional<unsigned> EI = getExtractIndex(cast<Instruction>(V));
3729  Idx = std::distance(ReorderMask.begin(), find(ReorderMask, *EI));
3730  }
3731  }
3732  // Build the order of the VF size, need to reorder reuses shuffles, they are
3733  // always of VF size.
3734  OrdersType ResOrder(VF);
3735  std::iota(ResOrder.begin(), ResOrder.end(), 0);
3736  auto *It = ResOrder.begin();
3737  for (unsigned K = 0; K < VF; K += Sz) {
3738  OrdersType CurrentOrder(TE.ReorderIndices);
3739  SmallVector<int> SubMask(makeArrayRef(ReusedMask).slice(K, Sz));
3740  if (SubMask.front() == UndefMaskElem)
3741  std::iota(SubMask.begin(), SubMask.end(), 0);
3742  reorderOrder(CurrentOrder, SubMask);
3743  transform(CurrentOrder, It, [K](unsigned Pos) { return Pos + K; });
3744  std::advance(It, Sz);
3745  }
3746  if (all_of(enumerate(ResOrder),
3747  [](const auto &Data) { return Data.index() == Data.value(); }))
3748  return {}; // Use identity order.
3749  return ResOrder;
3750  }
3751  if (TE.State == TreeEntry::Vectorize &&
3752  (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) ||
3753  (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) &&
3754  !TE.isAltShuffle())
3755  return TE.ReorderIndices;
3756  if (TE.State == TreeEntry::NeedToGather) {
3757  // TODO: add analysis of other gather nodes with extractelement
3758  // instructions and other values/instructions, not only undefs.
3759  if (((TE.getOpcode() == Instruction::ExtractElement &&
3760  !TE.isAltShuffle()) ||
3761  (all_of(TE.Scalars,
3762  [](Value *V) {
3763  return isa<UndefValue, ExtractElementInst>(V);
3764  }) &&
3765  any_of(TE.Scalars,
3766  [](Value *V) { return isa<ExtractElementInst>(V); }))) &&
3767  all_of(TE.Scalars,
3768  [](Value *V) {
3769  auto *EE = dyn_cast<ExtractElementInst>(V);
3770  return !EE || isa<FixedVectorType>(EE->getVectorOperandType());
3771  }) &&
3772  allSameType(TE.Scalars)) {
3773  // Check that gather of extractelements can be represented as
3774  // just a shuffle of a single vector.
3775  OrdersType CurrentOrder;
3776  bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder);
3777  if (Reuse || !CurrentOrder.empty()) {
3778  if (!CurrentOrder.empty())
3779  fixupOrderingIndices(CurrentOrder);
3780  return CurrentOrder;
3781  }
3782  }
3783  if (Optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE))
3784  return CurrentOrder;
3785  if (TE.Scalars.size() >= 4)
3786  if (Optional<OrdersType> Order = findPartiallyOrderedLoads(TE))
3787  return Order;
3788  }
3789  return None;
3790 }
3791 
3792 /// Checks if the given mask is a "clustered" mask with the same clusters of
3793 /// size \p Sz, which are not identity submasks.
3795  unsigned Sz) {
3796  ArrayRef<int> FirstCluster = Mask.slice(0, Sz);
3797  if (ShuffleVectorInst::isIdentityMask(FirstCluster))
3798  return false;
3799  for (unsigned I = 0, E = Mask.size(); I < E; I += Sz) {
3800  ArrayRef<int> Cluster = Mask.slice(I, Sz);
3801  if (Cluster != FirstCluster)
3802  return false;
3803  }
3804  return true;
3805 }
3806 
3807 void BoUpSLP::reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const {
3808  // For vectorized and non-clustered reused - just reorder reuses mask.
3809  const unsigned Sz = TE.Scalars.size();
3810  if (TE.State != TreeEntry::NeedToGather || !TE.ReorderIndices.empty() ||
3811  !ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices,
3812  Sz) ||
3813  !isRepeatedNonIdentityClusteredMask(TE.ReuseShuffleIndices, Sz)) {
3814  reorderReuses(TE.ReuseShuffleIndices, Mask);
3815  return;
3816  }
3817  // Try to improve gathered nodes with clustered reuses, if possible.
3818  reorderScalars(TE.Scalars, makeArrayRef(TE.ReuseShuffleIndices).slice(0, Sz));
3819  // Fill the reuses mask with the identity submasks.
3820  for (auto It = TE.ReuseShuffleIndices.begin(),
3821  End = TE.ReuseShuffleIndices.end();
3822  It != End; std::advance(It, Sz))
3823  std::iota(It, std::next(It + Sz), 0);
3824 }
3825 
3826 void BoUpSLP::reorderTopToBottom() {
3827  // Maps VF to the graph nodes.
3828  DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries;
3829  // ExtractElement gather nodes which can be vectorized and need to handle
3830  // their ordering.
3832 
3833  // AltShuffles can also have a preferred ordering that leads to fewer
3834  // instructions, e.g., the addsub instruction in x86.
3835  DenseMap<const TreeEntry *, OrdersType> AltShufflesToOrders;
3836 
3837  // Maps a TreeEntry to the reorder indices of external users.
3839  ExternalUserReorderMap;
3840  // FIXME: Workaround for syntax error reported by MSVC buildbots.
3841  TargetTransformInfo &TTIRef = *TTI;
3842  // Find all reorderable nodes with the given VF.
3843  // Currently the are vectorized stores,loads,extracts + some gathering of
3844  // extracts.
3845  for_each(VectorizableTree, [this, &TTIRef, &VFToOrderedEntries,
3846  &GathersToOrders, &ExternalUserReorderMap,
3847  &AltShufflesToOrders](
3848  const std::unique_ptr<TreeEntry> &TE) {
3849  // Look for external users that will probably be vectorized.
3850  SmallVector<OrdersType, 1> ExternalUserReorderIndices =
3851  findExternalStoreUsersReorderIndices(TE.get());
3852  if (!ExternalUserReorderIndices.empty()) {
3853  VFToOrderedEntries[TE->Scalars.size()].insert(TE.get());
3854  ExternalUserReorderMap.try_emplace(TE.get(),
3855  std::move(ExternalUserReorderIndices));
3856  }
3857 
3858  // Patterns like [fadd,fsub] can be combined into a single instruction in
3859  // x86. Reordering them into [fsub,fadd] blocks this pattern. So we need
3860  // to take into account their order when looking for the most used order.
3861  if (TE->isAltShuffle()) {
3862  VectorType *VecTy =
3863  FixedVectorType::get(TE->Scalars[0]->getType(), TE->Scalars.size());
3864  unsigned Opcode0 = TE->getOpcode();
3865  unsigned Opcode1 = TE->getAltOpcode();
3866  // The opcode mask selects between the two opcodes.
3867  SmallBitVector OpcodeMask(TE->Scalars.size(), false);
3868  for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size()))
3869  if (cast<Instruction>(TE->Scalars[Lane])->getOpcode() == Opcode1)
3870  OpcodeMask.set(Lane);
3871  // If this pattern is supported by the target then we consider the order.
3872  if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) {
3873  VFToOrderedEntries[TE->Scalars.size()].insert(TE.get());
3874  AltShufflesToOrders.try_emplace(TE.get(), OrdersType());
3875  }
3876  // TODO: Check the reverse order too.
3877  }
3878 
3879  if (Optional<OrdersType> CurrentOrder =
3880  getReorderingData(*TE, /*TopToBottom=*/true)) {
3881  // Do not include ordering for nodes used in the alt opcode vectorization,
3882  // better to reorder them during bottom-to-top stage. If follow the order
3883  // here, it causes reordering of the whole graph though actually it is
3884  // profitable just to reorder the subgraph that starts from the alternate
3885  // opcode vectorization node. Such nodes already end-up with the shuffle
3886  // instruction and it is just enough to change this shuffle rather than
3887  // rotate the scalars for the whole graph.
3888  unsigned Cnt = 0;
3889  const TreeEntry *UserTE = TE.get();
3890  while (UserTE && Cnt < RecursionMaxDepth) {
3891  if (UserTE->UserTreeIndices.size() != 1)
3892  break;
3893  if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) {
3894  return EI.UserTE->State == TreeEntry::Vectorize &&
3895  EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0;
3896  }))
3897  return;
3898  UserTE = UserTE->UserTreeIndices.back().UserTE;
3899  ++Cnt;
3900  }
3901  VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get());
3902  if (TE->State != TreeEntry::Vectorize || !TE->ReuseShuffleIndices.empty())
3903  GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
3904  }
3905  });
3906 
3907  // Reorder the graph nodes according to their vectorization factor.
3908  for (unsigned VF = VectorizableTree.front()->Scalars.size(); VF > 1;
3909  VF /= 2) {
3910  auto It = VFToOrderedEntries.find(VF);
3911  if (It == VFToOrderedEntries.end())
3912  continue;
3913  // Try to find the most profitable order. We just are looking for the most
3914  // used order and reorder scalar elements in the nodes according to this
3915  // mostly used order.
3916  ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef();
3917  // All operands are reordered and used only in this node - propagate the
3918  // most used order to the user node.
3919  MapVector<OrdersType, unsigned,
3921  OrdersUses;
3923  for (const TreeEntry *OpTE : OrderedEntries) {
3924  // No need to reorder this nodes, still need to extend and to use shuffle,
3925  // just need to merge reordering shuffle and the reuse shuffle.
3926  if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE))
3927  continue;
3928  // Count number of orders uses.
3929  const auto &Order = [OpTE, &GathersToOrders,
3930  &AltShufflesToOrders]() -> const OrdersType & {
3931  if (OpTE->State == TreeEntry::NeedToGather ||
3932  !OpTE->ReuseShuffleIndices.empty()) {
3933  auto It = GathersToOrders.find(OpTE);
3934  if (It != GathersToOrders.end())
3935  return It->second;
3936  }
3937  if (OpTE->isAltShuffle()) {
3938  auto It = AltShufflesToOrders.find(OpTE);
3939  if (It != AltShufflesToOrders.end())
3940  return It->second;
3941  }
3942  return OpTE->ReorderIndices;
3943  }();
3944  // First consider the order of the external scalar users.
3945  auto It = ExternalUserReorderMap.find(OpTE);
3946  if (It != ExternalUserReorderMap.end()) {
3947  const auto &ExternalUserReorderIndices = It->second;
3948  // If the OpTE vector factor != number of scalars - use natural order,
3949  // it is an attempt to reorder node with reused scalars but with
3950  // external uses.
3951  if (OpTE->getVectorFactor() != OpTE->Scalars.size()) {
3952  OrdersUses.insert(std::make_pair(OrdersType(), 0)).first->second +=
3953  ExternalUserReorderIndices.size();
3954  } else {
3955  for (const OrdersType &ExtOrder : ExternalUserReorderIndices)
3956  ++OrdersUses.insert(std::make_pair(ExtOrder, 0)).first->second;
3957  }
3958  // No other useful reorder data in this entry.
3959  if (Order.empty())
3960  continue;
3961  }
3962  // Stores actually store the mask, not the order, need to invert.
3963  if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() &&
3964  OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
3966  inversePermutation(Order, Mask);
3967  unsigned E = Order.size();
3968  OrdersType CurrentOrder(E, E);
3969  transform(Mask, CurrentOrder.begin(), [E](int Idx) {
3970  return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx);
3971  });
3972  fixupOrderingIndices(CurrentOrder);
3973  ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second;
3974  } else {
3975  ++OrdersUses.insert(std::make_pair(Order, 0)).first->second;
3976  }
3977  }
3978  // Set order of the user node.
3979  if (OrdersUses.empty())
3980  continue;
3981  // Choose the most used order.
3982  ArrayRef<unsigned> BestOrder = OrdersUses.front().first;
3983  unsigned Cnt = OrdersUses.front().second;
3984  for (const auto &Pair : drop_begin(OrdersUses)) {
3985  if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) {
3986  BestOrder = Pair.first;
3987  Cnt = Pair.second;
3988  }
3989  }
3990  // Set order of the user node.
3991  if (BestOrder.empty())
3992  continue;
3994  inversePermutation(BestOrder, Mask);
3995  SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem);
3996  unsigned E = BestOrder.size();
3997  transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
3998  return I < E ? static_cast<int>(I) : UndefMaskElem;
3999  });
4000  // Do an actual reordering, if profitable.
4001  for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
4002  // Just do the reordering for the nodes with the given VF.
4003  if (TE->Scalars.size() != VF) {
4004  if (TE->ReuseShuffleIndices.size() == VF) {
4005  // Need to reorder the reuses masks of the operands with smaller VF to
4006  // be able to find the match between the graph nodes and scalar
4007  // operands of the given node during vectorization/cost estimation.
4008  assert(all_of(TE->UserTreeIndices,
4009  [VF, &TE](const EdgeInfo &EI) {
4010  return EI.UserTE->Scalars.size() == VF ||
4011  EI.UserTE->Scalars.size() ==
4012  TE->Scalars.size();
4013  }) &&
4014  "All users must be of VF size.");
4015  // Update ordering of the operands with the smaller VF than the given
4016  // one.
4017  reorderNodeWithReuses(*TE, Mask);
4018  }
4019  continue;
4020  }
4021  if (TE->State == TreeEntry::Vectorize &&
4023  InsertElementInst>(TE->getMainOp()) &&
4024  !TE->isAltShuffle()) {
4025  // Build correct orders for extract{element,value}, loads and
4026  // stores.
4027  reorderOrder(TE->ReorderIndices, Mask);
4028  if (isa<InsertElementInst, StoreInst>(TE->getMainOp()))
4029  TE->reorderOperands(Mask);
4030  } else {
4031  // Reorder the node and its operands.
4032  TE->reorderOperands(Mask);
4033  assert(TE->ReorderIndices.empty() &&
4034  "Expected empty reorder sequence.");
4035  reorderScalars(TE->Scalars, Mask);
4036  }
4037  if (!TE->ReuseShuffleIndices.empty()) {
4038  // Apply reversed order to keep the original ordering of the reused
4039  // elements to avoid extra reorder indices shuffling.
4040  OrdersType CurrentOrder;
4041  reorderOrder(CurrentOrder, MaskOrder);
4042  SmallVector<int> NewReuses;
4043  inversePermutation(CurrentOrder, NewReuses);
4044  addMask(NewReuses, TE->ReuseShuffleIndices);
4045  TE->ReuseShuffleIndices.swap(NewReuses);
4046  }
4047  }
4048  }
4049 }
4050 
4051 bool BoUpSLP::canReorderOperands(
4052  TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
4053  ArrayRef<TreeEntry *> ReorderableGathers,
4054  SmallVectorImpl<TreeEntry *> &GatherOps) {
4055  for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) {
4056  if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) {
4057  return OpData.first == I &&
4058  OpData.second->State == TreeEntry::Vectorize;
4059  }))
4060  continue;
4061  if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) {
4062  // Do not reorder if operand node is used by many user nodes.
4063  if (any_of(TE->UserTreeIndices,
4064  [UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; }))
4065  return false;
4066  // Add the node to the list of the ordered nodes with the identity
4067  // order.
4068  Edges.emplace_back(I, TE);
4069  // Add ScatterVectorize nodes to the list of operands, where just
4070  // reordering of the scalars is required. Similar to the gathers, so
4071  // simply add to the list of gathered ops.
4072  // If there are reused scalars, process this node as a regular vectorize
4073  // node, just reorder reuses mask.
4074  if (TE->State != TreeEntry::Vectorize && TE->ReuseShuffleIndices.empty())
4075  GatherOps.push_back(TE);
4076  continue;
4077  }
4078  TreeEntry *Gather = nullptr;
4079  if (count_if(ReorderableGathers,
4080  [&Gather, UserTE, I](TreeEntry *TE) {
4081  assert(TE->State != TreeEntry::Vectorize &&
4082  "Only non-vectorized nodes are expected.");
4083  if (any_of(TE->UserTreeIndices,
4084  [UserTE, I](const EdgeInfo &EI) {
4085  return EI.UserTE == UserTE && EI.EdgeIdx == I;
4086  })) {
4087  assert(TE->isSame(UserTE->getOperand(I)) &&
4088  "Operand entry does not match operands.");
4089  Gather = TE;
4090  return true;
4091  }
4092  return false;
4093  }) > 1 &&
4094  !all_of(UserTE->getOperand(I), isConstant))
4095  return false;
4096  if (Gather)
4097  GatherOps.push_back(Gather);
4098  }
4099  return true;
4100 }
4101 
4102 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
4103  SetVector<TreeEntry *> OrderedEntries;
4105  // Find all reorderable leaf nodes with the given VF.
4106  // Currently the are vectorized loads,extracts without alternate operands +
4107  // some gathering of extracts.
4108  SmallVector<TreeEntry *> NonVectorized;
4109  for_each(VectorizableTree, [this, &OrderedEntries, &GathersToOrders,
4110  &NonVectorized](
4111  const std::unique_ptr<TreeEntry> &TE) {
4112  if (TE->State != TreeEntry::Vectorize)
4113  NonVectorized.push_back(TE.get());
4114  if (Optional<OrdersType> CurrentOrder =
4115  getReorderingData(*TE, /*TopToBottom=*/false)) {
4116  OrderedEntries.insert(TE.get());
4117  if (TE->State != TreeEntry::Vectorize || !TE->ReuseShuffleIndices.empty())
4118  GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
4119  }
4120  });
4121 
4122  // 1. Propagate order to the graph nodes, which use only reordered nodes.
4123  // I.e., if the node has operands, that are reordered, try to make at least
4124  // one operand order in the natural order and reorder others + reorder the
4125  // user node itself.
4127  while (!OrderedEntries.empty()) {
4128  // 1. Filter out only reordered nodes.
4129  // 2. If the entry has multiple uses - skip it and jump to the next node.
4131  SmallVector<TreeEntry *> Filtered;
4132  for (TreeEntry *TE : OrderedEntries) {
4133  if (!(TE->State == TreeEntry::Vectorize ||
4134  (TE->State == TreeEntry::NeedToGather &&
4135  GathersToOrders.count(TE))) ||
4136  TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
4137  !all_of(drop_begin(TE->UserTreeIndices),
4138  [TE](const EdgeInfo &EI) {
4139  return EI.UserTE == TE->UserTreeIndices.front().UserTE;
4140  }) ||
4141  !Visited.insert(TE).second) {
4142  Filtered.push_back(TE);
4143  continue;
4144  }
4145  // Build a map between user nodes and their operands order to speedup
4146  // search. The graph currently does not provide this dependency directly.
4147  for (EdgeInfo &EI : TE->UserTreeIndices) {
4148  TreeEntry *UserTE = EI.UserTE;
4149  auto It = Users.find(UserTE);
4150  if (It == Users.end())
4151  It = Users.insert({UserTE, {}}).first;
4152  It->second.emplace_back(EI.EdgeIdx, TE);
4153  }
4154  }
4155  // Erase filtered entries.
4156  for_each(Filtered,
4157  [&OrderedEntries](TreeEntry *TE) { OrderedEntries.remove(TE); });
4158  SmallVector<
4159  std::pair<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>>>
4160  UsersVec(Users.begin(), Users.end());
4161  sort(UsersVec, [](const auto &Data1, const auto &Data2) {
4162  return Data1.first->Idx > Data2.first->Idx;
4163  });
4164  for (auto &Data : UsersVec) {
4165  // Check that operands are used only in the User node.
4166  SmallVector<TreeEntry *> GatherOps;
4167  if (!canReorderOperands(Data.first, Data.second, NonVectorized,
4168  GatherOps)) {
4169  for_each(Data.second,
4170  [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
4171  OrderedEntries.remove(Op.second);
4172  });
4173  continue;
4174  }
4175  // All operands are reordered and used only in this node - propagate the
4176  // most used order to the user node.
4177  MapVector<OrdersType, unsigned,
4179  OrdersUses;
4180  // Do the analysis for each tree entry only once, otherwise the order of
4181  // the same node my be considered several times, though might be not
4182  // profitable.
4184  SmallPtrSet<const TreeEntry *, 4> VisitedUsers;
4185  for (const auto &Op : Data.second) {
4186  TreeEntry *OpTE = Op.second;
4187  if (!VisitedOps.insert(OpTE).second)
4188  continue;
4189  if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE))
4190  continue;
4191  const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & {
4192  if (OpTE->State == TreeEntry::NeedToGather ||
4193  !OpTE->ReuseShuffleIndices.empty())
4194  return GathersToOrders.find(OpTE)->second;
4195  return OpTE->ReorderIndices;
4196  }();
4197  unsigned NumOps = count_if(
4198  Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) {
4199  return P.second == OpTE;
4200  });
4201  // Stores actually store the mask, not the order, need to invert.
4202  if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() &&
4203  OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
4205  inversePermutation(Order, Mask);
4206  unsigned E = Order.size();
4207  OrdersType CurrentOrder(E, E);
4208  transform(Mask, CurrentOrder.begin(), [E](int Idx) {
4209  return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx);
4210  });
4211  fixupOrderingIndices(CurrentOrder);
4212  OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second +=
4213  NumOps;
4214  } else {
4215  OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps;
4216  }
4217  auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0));
4218  const auto &&AllowsReordering = [IgnoreReorder, &GathersToOrders](
4219  const TreeEntry *TE) {
4220  if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
4221  (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) ||
4222  (IgnoreReorder && TE->Idx == 0))
4223  return true;
4224  if (TE->State == TreeEntry::NeedToGather) {
4225  auto It = GathersToOrders.find(TE);
4226  if (It != GathersToOrders.end())
4227  return !It->second.empty();
4228  return true;
4229  }
4230  return false;
4231  };
4232  for (const EdgeInfo &EI : OpTE->UserTreeIndices) {
4233  TreeEntry *UserTE = EI.UserTE;
4234  if (!VisitedUsers.insert(UserTE).second)
4235  continue;
4236  // May reorder user node if it requires reordering, has reused
4237  // scalars, is an alternate op vectorize node or its op nodes require
4238  // reordering.
4239  if (AllowsReordering(UserTE))
4240  continue;
4241  // Check if users allow reordering.
4242  // Currently look up just 1 level of operands to avoid increase of
4243  // the compile time.
4244  // Profitable to reorder if definitely more operands allow
4245  // reordering rather than those with natural order.
4247  if (static_cast<unsigned>(count_if(
4248  Ops, [UserTE, &AllowsReordering](
4249  const std::pair<unsigned, TreeEntry *> &Op) {
4250  return AllowsReordering(Op.second) &&
4251  all_of(Op.second->UserTreeIndices,
4252  [UserTE](const EdgeInfo &EI) {
4253  return EI.UserTE == UserTE;
4254  });
4255  })) <= Ops.size() / 2)
4256  ++Res.first->second;
4257  }
4258  }
4259  // If no orders - skip current nodes and jump to the next one, if any.
4260  if (OrdersUses.empty()) {
4261  for_each(Data.second,
4262  [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
4263  OrderedEntries.remove(Op.second);
4264  });
4265  continue;
4266  }
4267  // Choose the best order.
4268  ArrayRef<unsigned> BestOrder = OrdersUses.front().first;
4269  unsigned Cnt = OrdersUses.front().second;
4270  for (const auto &Pair : drop_begin(OrdersUses)) {
4271  if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) {
4272  BestOrder = Pair.first;
4273  Cnt = Pair.second;
4274  }
4275  }
4276  // Set order of the user node (reordering of operands and user nodes).
4277  if (BestOrder.empty()) {
4278  for_each(Data.second,
4279  [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
4280  OrderedEntries.remove(Op.second);
4281  });
4282  continue;
4283  }
4284  // Erase operands from OrderedEntries list and adjust their orders.
4285  VisitedOps.clear();
4287  inversePermutation(BestOrder, Mask);
4288  SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem);
4289  unsigned E = BestOrder.size();
4290  transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
4291  return I < E ? static_cast<int>(I) : UndefMaskElem;
4292  });
4293  for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) {
4294  TreeEntry *TE = Op.second;
4295  OrderedEntries.remove(TE);
4296  if (!VisitedOps.insert(TE).second)
4297  continue;
4298  if (TE->ReuseShuffleIndices.size() == BestOrder.size()) {
4299  reorderNodeWithReuses(*TE, Mask);
4300  continue;
4301  }
4302  // Gathers are processed separately.
4303  if (TE->State != TreeEntry::Vectorize)
4304  continue;
4305  assert((BestOrder.size() == TE->ReorderIndices.size() ||
4306  TE->ReorderIndices.empty()) &&
4307  "Non-matching sizes of user/operand entries.");
4308  reorderOrder(TE->ReorderIndices, Mask);
4309  if (IgnoreReorder && TE == VectorizableTree.front().get())
4310  IgnoreReorder = false;
4311  }
4312  // For gathers just need to reorder its scalars.
4313  for (TreeEntry *Gather : GatherOps) {
4314  assert(Gather->ReorderIndices.empty() &&
4315  "Unexpected reordering of gathers.");
4316  if (!Gather->ReuseShuffleIndices.empty()) {
4317  // Just reorder reuses indices.
4318  reorderReuses(Gather->ReuseShuffleIndices, Mask);
4319  continue;
4320  }
4321  reorderScalars(Gather->Scalars, Mask);
4322  OrderedEntries.remove(Gather);
4323  }
4324  // Reorder operands of the user node and set the ordering for the user
4325  // node itself.
4326  if (Data.first->State != TreeEntry::Vectorize ||
4327  !isa<ExtractElementInst, ExtractValueInst, LoadInst>(
4328  Data.first->getMainOp()) ||
4329  Data.first->isAltShuffle())
4330  Data.first->reorderOperands(Mask);
4331  if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) ||
4332  Data.first->isAltShuffle()) {
4333  reorderScalars(Data.first->Scalars, Mask);
4334  reorderOrder(Data.first->ReorderIndices, MaskOrder);
4335  if (Data.first->ReuseShuffleIndices.empty() &&
4336  !Data.first->ReorderIndices.empty() &&
4337  !Data.first->isAltShuffle()) {
4338  // Insert user node to the list to try to sink reordering deeper in
4339  // the graph.
4340  OrderedEntries.insert(Data.first);
4341  }
4342  } else {
4343  reorderOrder(Data.first->ReorderIndices, Mask);
4344  }
4345  }
4346  }
4347  // If the reordering is unnecessary, just remove the reorder.
4348  if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() &&
4349  VectorizableTree.front()->ReuseShuffleIndices.empty())
4350  VectorizableTree.front()->ReorderIndices.clear();
4351 }
4352 
4353 void BoUpSLP::buildExternalUses(
4354  const ExtraValueToDebugLocsMap &ExternallyUsedValues) {
4355  // Collect the values that we need to extract from the tree.
4356  for (auto &TEPtr : VectorizableTree) {
4357  TreeEntry *Entry = TEPtr.get();
4358 
4359  // No need to handle users of gathered values.
4360  if (Entry->State == TreeEntry::NeedToGather)
4361  continue;
4362 
4363  // For each lane:
4364  for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
4365  Value *Scalar = Entry->Scalars[Lane];
4366  int FoundLane = Entry->findLaneForValue(Scalar);
4367 
4368  // Check if the scalar is externally used as an extra arg.
4369  auto ExtI = ExternallyUsedValues.find(Scalar);
4370  if (ExtI != ExternallyUsedValues.end()) {
4371  LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
4372  << Lane << " from " << *Scalar << ".\n");
4373  ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
4374  }
4375  for (User *U : Scalar->users()) {
4376  LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
4377 
4378  Instruction *UserInst = dyn_cast<Instruction>(U);
4379  if (!UserInst)
4380  continue;
4381 
4382  if (isDeleted(UserInst))
4383  continue;
4384 
4385  // Skip in-tree scalars that become vectors
4386  if (TreeEntry *UseEntry = getTreeEntry(U)) {
4387  Value *UseScalar = UseEntry->Scalars[0];
4388  // Some in-tree scalars will remain as scalar in vectorized
4389  // instructions. If that is the case, the one in Lane 0 will
4390  // be used.
4391  if (UseScalar != U ||
4392  UseEntry->State == TreeEntry::ScatterVectorize ||
4393  !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
4394  LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
4395  << ".\n");
4396  assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state");
4397  continue;
4398  }
4399  }
4400 
4401  // Ignore users in the user ignore list.
4402  if (UserIgnoreList && UserIgnoreList->contains(UserInst))
4403  continue;
4404 
4405  LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane "
4406  << Lane << " from " << *Scalar << ".\n");
4407  ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane));
4408  }
4409  }
4410  }
4411 }
4412 
4414 BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const {
4416  for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) {
4417  Value *V = TE->Scalars[Lane];
4418  // To save compilation time we don't visit if we have too many users.
4419  static constexpr unsigned UsersLimit = 4;
4420  if (V->hasNUsesOrMore(UsersLimit))
4421  break;
4422 
4423  // Collect stores per pointer object.
4424  for (User *U : V->users()) {
4425  auto *SI = dyn_cast<StoreInst>(U);
4426  if (SI == nullptr || !SI->isSimple() ||
4427  !isValidElementType(SI->getValueOperand()->getType()))
4428  continue;
4429  // Skip entry if already
4430  if (getTreeEntry(U))
4431  continue;
4432 
4433  Value *Ptr = getUnderlyingObject(SI->getPointerOperand());
4434  auto &StoresVec = PtrToStoresMap[Ptr];
4435  // For now just keep one store per pointer object per lane.
4436  // TODO: Extend this to support multiple stores per pointer per lane
4437  if (StoresVec.size() > Lane)
4438  continue;
4439  // Skip if in different BBs.
4440  if (!StoresVec.empty() &&
4441  SI->getParent() != StoresVec.back()->getParent())
4442  continue;
4443  // Make sure that the stores are of the same type.
4444  if (!StoresVec.empty() &&
4445  SI->getValueOperand()->getType() !=
4446  StoresVec.back()->getValueOperand()->getType())
4447  continue;
4448  StoresVec.push_back(SI);
4449  }
4450  }
4451  return PtrToStoresMap;
4452 }
4453 
4454 bool BoUpSLP::canFormVector(const SmallVector<StoreInst *, 4> &StoresVec,
4455  OrdersType &ReorderIndices) const {
4456  // We check whether the stores in StoreVec can form a vector by sorting them
4457  // and checking whether they are consecutive.
4458 
4459  // To avoid calling getPointersDiff() while sorting we create a vector of
4460  // pairs {store, offset from first} and sort this instead.
4461  SmallVector<std::pair<StoreInst *, int>, 4> StoreOffsetVec(StoresVec.size());
4462  StoreInst *S0 = StoresVec[0];
4463  StoreOffsetVec[0] = {S0, 0};
4464  Type *S0Ty = S0->getValueOperand()->getType();
4465  Value *S0Ptr = S0->getPointerOperand();
4466  for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) {
4467  StoreInst *SI = StoresVec[Idx];
4468  Optional<int> Diff =
4469  getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(),
4470  SI->getPointerOperand(), *DL, *SE,
4471  /*StrictCheck=*/true);
4472  // We failed to compare the pointers so just abandon this StoresVec.
4473  if (!Diff)
4474  return false;
4475  StoreOffsetVec[Idx] = {StoresVec[Idx], *Diff};
4476  }
4477 
4478  // Sort the vector based on the pointers. We create a copy because we may
4479  // need the original later for calculating the reorder (shuffle) indices.
4480  stable_sort(StoreOffsetVec, [](const std::pair<StoreInst *, int> &Pair1,
4481  const std::pair<StoreInst *, int> &Pair2) {
4482  int Offset1 = Pair1.second;
4483  int Offset2 = Pair2.second;
4484  return Offset1 < Offset2;
4485  });
4486 
4487  // Check if the stores are consecutive by checking if their difference is 1.
4488  for (unsigned Idx : seq<unsigned>(1, StoreOffsetVec.size()))
4489  if (StoreOffsetVec[Idx].second != StoreOffsetVec[Idx-1].second + 1)
4490  return false;
4491 
4492  // Calculate the shuffle indices according to their offset against the sorted
4493  // StoreOffsetVec.
4494  ReorderIndices.reserve(StoresVec.size());
4495  for (StoreInst *SI : StoresVec) {
4496  unsigned Idx = find_if(StoreOffsetVec,
4497  [SI](const std::pair<StoreInst *, int> &Pair) {
4498  return Pair.first == SI;
4499  }) -
4500  StoreOffsetVec.begin();
4501  ReorderIndices.push_back(Idx);
4502  }
4503  // Identity order (e.g., {0,1,2,3}) is modeled as an empty OrdersType in
4504  // reorderTopToBottom() and reorderBottomToTop(), so we are following the
4505  // same convention here.
4506  auto IsIdentityOrder = [](const OrdersType &Order) {
4507  for (unsigned Idx : seq<unsigned>(0, Order.size()))
4508  if (Idx != Order[Idx])
4509  return false;
4510  return true;
4511  };
4512  if (IsIdentityOrder(ReorderIndices))
4513  ReorderIndices.clear();
4514 
4515  return true;
4516 }
4517 
4518 #ifndef NDEBUG
4519 LLVM_DUMP_METHOD static void dumpOrder(const BoUpSLP::OrdersType &Order) {
4520  for (unsigned Idx : Order)
4521  dbgs() << Idx << ", ";
4522  dbgs() << "\n";
4523 }
4524 #endif
4525 
4527 BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const {
4528  unsigned NumLanes = TE->Scalars.size();
4529 
4531  collectUserStores(TE);
4532 
4533  // Holds the reorder indices for each candidate store vector that is a user of
4534  // the current TreeEntry.
4535  SmallVector<OrdersType, 1> ExternalReorderIndices;
4536 
4537  // Now inspect the stores collected per pointer and look for vectorization
4538  // candidates. For each candidate calculate the reorder index vector and push
4539  // it into `ExternalReorderIndices`
4540  for (const auto &Pair : PtrToStoresMap) {
4541  auto &StoresVec = Pair.second;
4542  // If we have fewer than NumLanes stores, then we can't form a vector.
4543  if (StoresVec.size() != NumLanes)
4544  continue;
4545 
4546  // If the stores are not consecutive then abandon this StoresVec.
4547  OrdersType ReorderIndices;
4548  if (!canFormVector(StoresVec, ReorderIndices))
4549  continue;
4550 
4551  // We now know that the scalars in StoresVec can form a vector instruction,
4552  // so set the reorder indices.
4553  ExternalReorderIndices.push_back(ReorderIndices);
4554  }
4555  return ExternalReorderIndices;
4556 }
4557 
4558 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
4559  const SmallDenseSet<Value *> &UserIgnoreLst) {
4560  deleteTree();
4561  UserIgnoreList = &UserIgnoreLst;
4562  if (!allSameType(Roots))
4563  return;
4564  buildTree_rec(Roots, 0, EdgeInfo());
4565 }
4566 
4567 void BoUpSLP::buildTree(ArrayRef<Value *> Roots) {
4568  deleteTree();
4569  if (!allSameType(Roots))
4570  return;
4571  buildTree_rec(Roots, 0, EdgeInfo());
4572 }
4573 
4574 /// \return true if the specified list of values has only one instruction that
4575 /// requires scheduling, false otherwise.
4576 #ifndef NDEBUG
4578  Value *NeedsScheduling = nullptr;
4579  for (Value *V : VL) {
4580  if (doesNotNeedToBeScheduled(V))
4581  continue;
4582  if (!NeedsScheduling) {
4583  NeedsScheduling = V;
4584  continue;
4585  }
4586  return false;
4587  }
4588  return NeedsScheduling;
4589 }
4590 #endif
4591 
4592 /// Generates key/subkey pair for the given value to provide effective sorting
4593 /// of the values and better detection of the vectorizable values sequences. The
4594 /// keys/subkeys can be used for better sorting of the values themselves (keys)
4595 /// and in values subgroups (subkeys).
4596 static std::pair<size_t, size_t> generateKeySubkey(
4597  Value *V, const TargetLibraryInfo *TLI,
4598  function_ref<hash_code(size_t, LoadInst *)> LoadsSubkeyGenerator,
4599  bool AllowAlternate) {
4600  hash_code Key = hash_value(V->getValueID() + 2);
4601  hash_code SubKey = hash_value(0);
4602  // Sort the loads by the distance between the pointers.
4603  if (auto *LI = dyn_cast<LoadInst>(V)) {
4605  if (LI->isSimple())
4606  SubKey = hash_value(LoadsSubkeyGenerator(Key, LI));
4607  else
4608  SubKey = hash_value(LI);
4609  } else if (isVectorLikeInstWithConstOps(V)) {
4610  // Sort extracts by the vector operands.
4611  if (isa<ExtractElementInst, UndefValue>(V))
4612  Key = hash_value(Value::UndefValueVal + 1);
4613  if (auto *EI = dyn_cast<ExtractElementInst>(V)) {
4614  if (!isUndefVector(EI->getVectorOperand()) &&
4615  !isa<UndefValue>(EI->getIndexOperand()))
4616  SubKey = hash_value(EI->getVectorOperand());
4617  }
4618  } else if (auto *I = dyn_cast<Instruction>(V)) {
4619  // Sort other instructions just by the opcodes except for CMPInst.
4620  // For CMP also sort by the predicate kind.
4621  if ((isa<BinaryOperator, CastInst>(I)) &&
4622  isValidForAlternation(I->getOpcode())) {
4623  if (AllowAlternate)
4624  Key = hash_value(isa<BinaryOperator>(I) ? 1 : 0);
4625  else
4626  Key = hash_combine(hash_value(I->getOpcode()), Key);
4627  SubKey = hash_combine(
4628  hash_value(I->getOpcode()), hash_value(I->getType()),
4629  hash_value(isa<BinaryOperator>(I)
4630  ? I->getType()
4631  : cast<CastInst>(I)->getOperand(0)->getType()));
4632  // For casts, look through the only operand to improve compile time.
4633  if (isa<CastInst>(I)) {
4634  std::pair<size_t, size_t> OpVals =
4635  generateKeySubkey(I->getOperand(0), TLI, LoadsSubkeyGenerator,
4636  /*=AllowAlternate*/ true);
4637  Key = hash_combine(OpVals.first, Key);
4638  SubKey = hash_combine(OpVals.first, SubKey);
4639  }
4640  } else if (auto *CI = dyn_cast<CmpInst>(I)) {
4641  CmpInst::Predicate Pred = CI->getPredicate();
4642  if (CI->isCommutative())
4643  Pred = std::min(Pred, CmpInst::getInversePredicate(Pred));
4645  SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Pred),
4646  hash_value(SwapPred),
4647  hash_value(CI->getOperand(0)->getType()));
4648  } else if (auto *Call = dyn_cast<CallInst>(I)) {
4650  if (isTriviallyVectorizable(ID)) {
4651  SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(ID));
4652  } else if (!VFDatabase(*Call).getMappings(*Call).empty()) {
4653  SubKey = hash_combine(hash_value(I->getOpcode()),
4654  hash_value(Call->getCalledFunction()));
4655  } else {
4656  Key = hash_combine(hash_value(Call), Key);
4657  SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Call));
4658  }
4659  for (const CallBase::BundleOpInfo &Op : Call->bundle_op_infos())
4660  SubKey = hash_combine(hash_value(Op.Begin), hash_value(Op.End),
4661  hash_value(Op.Tag), SubKey);
4662  } else if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) {
4663  if (Gep->getNumOperands() == 2 && isa<ConstantInt>(Gep->getOperand(1)))
4664  SubKey = hash_value(Gep->getPointerOperand());
4665  else
4666  SubKey = hash_value(Gep);
4667  } else if (BinaryOperator::isIntDivRem(I->getOpcode()) &&
4668  !isa<ConstantInt>(I->getOperand(1))) {
4669  // Do not try to vectorize instructions with potentially high cost.
4670  SubKey = hash_value(I);
4671  } else {
4672  SubKey = hash_value(I->getOpcode());
4673  }
4674  Key = hash_combine(hash_value(I->getParent()), Key);
4675  }
4676  return std::make_pair(Key, SubKey);
4677 }
4678 
4679 /// Checks if the specified instruction \p I is an alternate operation for
4680 /// the given \p MainOp and \p AltOp instructions.
4681 static bool isAlternateInstruction(const Instruction *I,
4682  const Instruction *MainOp,
4683  const Instruction *AltOp);
4684 
4685 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
4686  const EdgeInfo &UserTreeIdx) {
4687  assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
4688 
4689  SmallVector<int> ReuseShuffleIndicies;
4690  SmallVector<Value *> UniqueValues;
4691  auto &&TryToFindDuplicates = [&VL, &ReuseShuffleIndicies, &UniqueValues,
4692  &UserTreeIdx,
4693  this](const InstructionsState &S) {
4694  // Check that every instruction appears once in this bundle.
4695  DenseMap<Value *, unsigned> UniquePositions;
4696  for (Value *V : VL) {
4697  if (isConstant(V)) {
4698  ReuseShuffleIndicies.emplace_back(
4699  isa<UndefValue>(V) ? UndefMaskElem : UniqueValues.size());
4700  UniqueValues.emplace_back(V);
4701  continue;
4702  }
4703  auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
4704  ReuseShuffleIndicies.emplace_back(Res.first->second);
4705  if (Res.second)
4706  UniqueValues.emplace_back(V);
4707  }
4708  size_t NumUniqueScalarValues = UniqueValues.size();
4709  if (NumUniqueScalarValues == VL.size()) {
4710  ReuseShuffleIndicies.clear();
4711  } else {
4712  LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
4713  if (NumUniqueScalarValues <= 1 ||
4714  (UniquePositions.size() == 1 && all_of(UniqueValues,
4715  [](Value *V) {
4716  return isa<UndefValue>(V) ||
4717  !isConstant(V);
4718  })) ||
4719  !llvm::isPowerOf2_32(NumUniqueScalarValues)) {
4720  LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
4721  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4722  return false;
4723  }
4724  VL = UniqueValues;
4725  }
4726  return true;
4727  };
4728 
4729  InstructionsState S = getSameOpcode(VL);
4730 
4731  // Gather if we hit the RecursionMaxDepth, unless this is a load (or z/sext of
4732  // a load), in which case peek through to include it in the tree, without
4733  // ballooning over-budget.
4734  if (Depth >= RecursionMaxDepth &&
4735  !(S.MainOp && isa<Instruction>(S.MainOp) && S.MainOp == S.AltOp &&
4736  VL.size() >= 4 &&
4737  (match(S.MainOp, m_Load(m_Value())) || all_of(VL, [&S](const Value *I) {
4738  return match(I,
4740  cast<Instruction>(I)->getOpcode() ==
4741  cast<Instruction>(S.MainOp)->getOpcode();
4742  })))) {
4743  LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
4744  if (TryToFindDuplicates(S))
4745  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4746  ReuseShuffleIndicies);
4747  return;
4748  }
4749 
4750  // Don't handle scalable vectors
4751  if (S.getOpcode() == Instruction::ExtractElement &&
4752  isa<ScalableVectorType>(
4753  cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) {
4754  LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n");
4755  if (TryToFindDuplicates(S))
4756  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4757  ReuseShuffleIndicies);
4758  return;
4759  }
4760 
4761  // Don't handle vectors.
4762  if (S.OpValue->getType()->isVectorTy() &&
4763  !isa<InsertElementInst>(S.OpValue)) {
4764  LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
4765  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4766  return;
4767  }
4768 
4769  if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
4770  if (SI->getValueOperand()->getType()->isVectorTy()) {
4771  LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
4772  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4773  return;
4774  }
4775 
4776  // If all of the operands are identical or constant we have a simple solution.
4777  // If we deal with insert/extract instructions, they all must have constant
4778  // indices, otherwise we should gather them, not try to vectorize.
4779  // If alternate op node with 2 elements with gathered operands - do not
4780  // vectorize.
4781  auto &&NotProfitableForVectorization = [&S, this,
4782  Depth](ArrayRef<Value *> VL) {
4783  if (!S.getOpcode() || !S.isAltShuffle() || VL.size() > 2)
4784  return false;
4785  if (VectorizableTree.size() < MinTreeSize)
4786  return false;
4787  if (Depth >= RecursionMaxDepth - 1)
4788  return true;
4789  // Check if all operands are extracts, part of vector node or can build a
4790  // regular vectorize node.
4791  SmallVector<unsigned, 2> InstsCount(VL.size(), 0);
4792  for (Value *V : VL) {
4793  auto *I = cast<Instruction>(V);
4794  InstsCount.push_back(count_if(I->operand_values(), [](Value *Op) {
4795  return isa<Instruction>(Op) || isVectorLikeInstWithConstOps(Op);
4796  }));
4797  }
4798  bool IsCommutative = isCommutative(S.MainOp) || isCommutative(S.AltOp);
4799  if ((IsCommutative &&
4800  std::accumulate(InstsCount.begin(), InstsCount.end(), 0) < 2) ||
4801  (!IsCommutative &&
4802  all_of(InstsCount, [](unsigned ICnt) { return ICnt < 2; })))
4803  return true;
4804  assert(VL.size() == 2 && "Expected only 2 alternate op instructions.");
4806  auto *I1 = cast<Instruction>(VL.front());
4807  auto *I2 = cast<Instruction>(VL.back());
4808  for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op)
4809  Candidates.emplace_back().emplace_back(I1->getOperand(Op),
4810  I2->getOperand(Op));
4811  if (static_cast<unsigned>(count_if(
4812  Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
4813  return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat);
4814  })) >= S.MainOp->getNumOperands() / 2)
4815  return false;
4816  if (S.MainOp->getNumOperands() > 2)
4817  return true;
4818  if (IsCommutative) {
4819  // Check permuted operands.
4820  Candidates.clear();
4821  for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op)
4822  Candidates.emplace_back().emplace_back(I1->getOperand(Op),
4823  I2->getOperand((Op + 1) % E));
4824  if (any_of(
4825  Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
4826  return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat);
4827  }))
4828  return false;
4829  }
4830  return true;
4831  };
4832  SmallVector<unsigned> SortedIndices;
4833  BasicBlock *BB = nullptr;
4834  bool IsScatterVectorizeUserTE =
4835  UserTreeIdx.UserTE &&
4836  UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize;
4837  bool AreAllSameInsts =
4838  (S.getOpcode() && allSameBlock(VL)) ||
4839  (S.OpValue->getType()->isPointerTy() && IsScatterVectorizeUserTE &&
4840  VL.size() > 2 &&
4841  all_of(VL,
4842  [&BB](Value *V) {
4843  auto *I = dyn_cast<GetElementPtrInst>(V);
4844  if (!I)
4845  return doesNotNeedToBeScheduled(V);
4846  if (!BB)
4847  BB = I->getParent();
4848  return BB == I->getParent() && I->getNumOperands() == 2;
4849  }) &&
4850  BB &&
4851  sortPtrAccesses(VL, UserTreeIdx.UserTE->getMainOp()->getType(), *DL, *SE,
4852  SortedIndices));
4853  if (allConstant(VL) || isSplat(VL) || !AreAllSameInsts ||
4854  (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>(
4855  S.OpValue) &&
4857  NotProfitableForVectorization(VL)) {
4858  LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n");
4859  if (TryToFindDuplicates(S))
4860  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4861  ReuseShuffleIndicies);
4862  return;
4863  }
4864 
4865  // We now know that this is a vector of instructions of the same type from
4866  // the same block.
4867 
4868  // Don't vectorize ephemeral values.
4869  if (!EphValues.empty()) {
4870  for (Value *V : VL) {
4871  if (EphValues.count(V)) {
4872  LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
4873  << ") is ephemeral.\n");
4874  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4875  return;
4876  }
4877  }
4878  }
4879 
4880  // Check if this is a duplicate of another entry.
4881  if (TreeEntry *E = getTreeEntry(S.OpValue)) {
4882  LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n");
4883  if (!E->isSame(VL)) {
4884  LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
4885  if (TryToFindDuplicates(S))
4886  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4887  ReuseShuffleIndicies);
4888  return;
4889  }
4890  // Record the reuse of the tree node. FIXME, currently this is only used to
4891  // properly draw the graph rather than for the actual vectorization.
4892  E->UserTreeIndices.push_back(UserTreeIdx);
4893  LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue
4894  << ".\n");
4895  return;
4896  }
4897 
4898  // Check that none of the instructions in the bundle are already in the tree.
4899  for (Value *V : VL) {
4900  if (!IsScatterVectorizeUserTE && !isa<Instruction>(V))
4901  continue;
4902  if (getTreeEntry(V)) {
4903  LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
4904  << ") is already in tree.\n");
4905  if (TryToFindDuplicates(S))
4906  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4907  ReuseShuffleIndicies);
4908  return;
4909  }
4910  }
4911 
4912  // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
4913  if (UserIgnoreList && !UserIgnoreList->empty()) {
4914  for (Value *V : VL) {
4915  if (UserIgnoreList && UserIgnoreList->contains(V)) {
4916  LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
4917  if (TryToFindDuplicates(S))
4918  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4919  ReuseShuffleIndicies);
4920  return;
4921  }
4922  }
4923  }
4924 
4925  // Special processing for sorted pointers for ScatterVectorize node with
4926  // constant indeces only.
4927  if (AreAllSameInsts && !(S.getOpcode() && allSameBlock(VL)) &&
4928  UserTreeIdx.UserTE &&
4929  UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize) {
4930  assert(S.OpValue->getType()->isPointerTy() &&
4931  count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >=
4932  2 &&
4933  "Expected pointers only.");
4934  // Reset S to make it GetElementPtr kind of node.
4935  const auto *It = find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); });
4936  assert(It != VL.end() && "Expected at least one GEP.");
4937  S = getSameOpcode(*It);
4938  }
4939 
4940  // Check that all of the users of the scalars that we want to vectorize are
4941  // schedulable.
4942  auto *VL0 = cast<Instruction>(S.OpValue);
4943  BB = VL0->getParent();
4944 
4945  if (!DT->isReachableFromEntry(BB)) {
4946  // Don't go into unreachable blocks. They may contain instructions with
4947  // dependency cycles which confuse the final scheduling.
4948  LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
4949  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4950  return;
4951  }
4952 
4953  // Don't go into catchswitch blocks, which can happen with PHIs.
4954  // Such blocks can only have PHIs and the catchswitch. There is no
4955  // place to insert a shuffle if we need to, so just avoid that issue.
4956  if (isa<CatchSwitchInst>(BB->getTerminator())) {
4957  LLVM_DEBUG(dbgs() << "SLP: bundle in catchswitch block.\n");
4958  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4959  return;
4960  }
4961 
4962  // Check that every instruction appears once in this bundle.
4963  if (!TryToFindDuplicates(S))
4964  return;
4965 
4966  auto &BSRef = BlocksSchedules[BB];
4967  if (!BSRef)
4968  BSRef = std::make_unique<BlockScheduling>(BB);
4969 
4970  BlockScheduling &BS = *BSRef;
4971 
4972  Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S);
4973 #ifdef EXPENSIVE_CHECKS
4974  // Make sure we didn't break any internal invariants
4975  BS.verify();
4976 #endif
4977  if (!Bundle) {
4978  LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
4979  assert((!BS.getScheduleData(VL0) ||
4980  !BS.getScheduleData(VL0)->isPartOfBundle()) &&
4981  "tryScheduleBundle should cancelScheduling on failure");
4982  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4983  ReuseShuffleIndicies);
4984  return;
4985  }
4986  LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
4987 
4988  unsigned ShuffleOrOp = S.isAltShuffle() ?
4989  (unsigned) Instruction::ShuffleVector : S.getOpcode();
4990  switch (ShuffleOrOp) {
4991  case Instruction::PHI: {
4992  auto *PH = cast<PHINode>(VL0);
4993 
4994  // Check for terminator values (e.g. invoke).
4995  for (Value *V : VL)
4996  for (Value *Incoming : cast<PHINode>(V)->incoming_values()) {
4997  Instruction *Term = dyn_cast<Instruction>(Incoming);
4998  if (Term && Term->isTerminator()) {
4999  LLVM_DEBUG(dbgs()
5000  << "SLP: Need to swizzle PHINodes (terminator use).\n");
5001  BS.cancelScheduling(VL, VL0);
5002  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5003  ReuseShuffleIndicies);
5004  return;
5005  }
5006  }
5007 
5008  TreeEntry *TE =
5009  newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies);
5010  LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
5011 
5012  // Keeps the reordered operands to avoid code duplication.
5013  SmallVector<ValueList, 2> OperandsVec;
5014  for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
5015  if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) {
5016  ValueList Operands(VL.size(), PoisonValue::get(PH->getType()));
5017  TE->setOperand(I, Operands);
5018  OperandsVec.push_back(Operands);
5019  continue;
5020  }
5021  ValueList Operands;
5022  // Prepare the operand vector.
5023  for (Value *V : VL)
5024  Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(
5025  PH->getIncomingBlock(I)));
5026  TE->setOperand(I, Operands);
5027  OperandsVec.push_back(Operands);
5028  }
5029  for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx)
5030  buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx});
5031  return;
5032  }
5033  case Instruction::ExtractValue:
5034  case Instruction::ExtractElement: {
5035  OrdersType CurrentOrder;
5036  bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
5037  if (Reuse) {
5038  LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
5039  newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5040  ReuseShuffleIndicies);
5041  // This is a special case, as it does not gather, but at the same time
5042  // we are not extending buildTree_rec() towards the operands.
5043  ValueList Op0;
5044  Op0.assign(VL.size(), VL0->getOperand(0));
5045  VectorizableTree.back()->setOperand(0, Op0);
5046  return;
5047  }
5048  if (!CurrentOrder.empty()) {
5049  LLVM_DEBUG({
5050  dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
5051  "with order";
5052  for (unsigned Idx : CurrentOrder)
5053  dbgs() << " " << Idx;
5054  dbgs() << "\n";
5055  });
5056  fixupOrderingIndices(CurrentOrder);
5057  // Insert new order with initial value 0, if it does not exist,
5058  // otherwise return the iterator to the existing one.
5059  newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5060  ReuseShuffleIndicies, CurrentOrder);
5061  // This is a special case, as it does not gather, but at the same time
5062  // we are not extending buildTree_rec() towards the operands.
5063  ValueList Op0;
5064  Op0.assign(VL.size(), VL0->getOperand(0));
5065  VectorizableTree.back()->setOperand(0, Op0);
5066  return;
5067  }
5068  LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
5069  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5070  ReuseShuffleIndicies);
5071  BS.cancelScheduling(VL, VL0);
5072  return;
5073  }
5074  case Instruction::InsertElement: {
5075  assert(ReuseShuffleIndicies.empty() && "All inserts should be unique");
5076 
5077  // Check that we have a buildvector and not a shuffle of 2 or more
5078  // different vectors.
5079  ValueSet SourceVectors;
5080  for (Value *V : VL) {
5081  SourceVectors.insert(cast<Instruction>(V)->getOperand(0));
5082  assert(getInsertIndex(V) != None && "Non-constant or undef index?");
5083  }
5084 
5085  if (count_if(VL, [&SourceVectors](Value *V) {
5086  return !SourceVectors.contains(V);
5087  }) >= 2) {
5088  // Found 2nd source vector - cancel.
5089  LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with "
5090  "different source vectors.\n");
5091  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
5092  BS.cancelScheduling(VL, VL0);
5093  return;
5094  }
5095 
5096  auto OrdCompare = [](const std::pair<int, int> &P1,
5097  const std::pair<int, int> &P2) {
5098  return P1.first > P2.first;
5099  };
5101  decltype(OrdCompare)>
5102  Indices(OrdCompare);
5103  for (int I = 0, E = VL.size(); I < E; ++I) {
5104  unsigned Idx = *getInsertIndex(VL[I]);
5105  Indices.emplace(Idx, I);
5106  }
5107  OrdersType CurrentOrder(VL.size(), VL.size());
5108  bool IsIdentity = true;
5109  for (int I = 0, E = VL.size(); I < E; ++I) {
5110  CurrentOrder[Indices.top().second] = I;
5111  IsIdentity &= Indices.top().second == I;
5112  Indices.pop();
5113  }
5114  if (IsIdentity)
5115  CurrentOrder.clear();
5116  TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5117  None, CurrentOrder);
5118  LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n");
5119 
5120  constexpr int NumOps = 2;
5121  ValueList VectorOperands[NumOps];
5122  for (int I = 0; I < NumOps; ++I) {
5123  for (Value *V : VL)
5124  VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I));
5125 
5126  TE->setOperand(I, VectorOperands[I]);
5127  }
5128  buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1});
5129  return;
5130  }
5131  case Instruction::Load: {
5132  // Check that a vectorized load would load the same memory as a scalar
5133  // load. For example, we don't want to vectorize loads that are smaller
5134  // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
5135  // treats loading/storing it as an i8 struct. If we vectorize loads/stores
5136  // from such a struct, we read/write packed bits disagreeing with the
5137  // unvectorized version.
5138  SmallVector<Value *> PointerOps;
5139  OrdersType CurrentOrder;
5140  TreeEntry *TE = nullptr;
5141  switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, *LI, CurrentOrder,
5142  PointerOps)) {
5143  case LoadsState::Vectorize:
5144  if (CurrentOrder.empty()) {
5145  // Original loads are consecutive and does not require reordering.
5146  TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5147  ReuseShuffleIndicies);
5148  LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
5149  } else {
5150  fixupOrderingIndices(CurrentOrder);
5151  // Need to reorder.
5152  TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5153  ReuseShuffleIndicies, CurrentOrder);
5154  LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
5155  }
5156  TE->setOperandsInOrder();
5157  break;
5158  case LoadsState::ScatterVectorize:
5159  // Vectorizing non-consecutive loads with `llvm.masked.gather`.
5160  TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S,
5161  UserTreeIdx, ReuseShuffleIndicies);
5162  TE->setOperandsInOrder();
5163  buildTree_rec(PointerOps, Depth + 1, {TE, 0});
5164  LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n");
5165  break;
5166  case LoadsState::Gather:
5167  BS.cancelScheduling(VL, VL0);
5168  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5169  ReuseShuffleIndicies);
5170 #ifndef NDEBUG
5171  Type *ScalarTy = VL0->getType();
5172  if (DL->getTypeSizeInBits(ScalarTy) !=
5173  DL->getTypeAllocSizeInBits(ScalarTy))
5174  LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
5175  else if (any_of(VL, [](Value *V) {
5176  return !cast<LoadInst>(V)->isSimple();
5177  }))
5178  LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
5179  else
5180  LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
5181 #endif // NDEBUG
5182  break;
5183  }
5184  return;
5185  }
5186  case Instruction::ZExt:
5187  case Instruction::SExt:
5188  case Instruction::FPToUI:
5189  case Instruction::FPToSI:
5190  case Instruction::FPExt:
5191  case Instruction::PtrToInt:
5192  case Instruction::IntToPtr:
5193  case Instruction::SIToFP:
5194  case Instruction::UIToFP:
5195  case Instruction::Trunc:
5196  case Instruction::FPTrunc:
5197  case Instruction::BitCast: {
5198  Type *SrcTy = VL0->getOperand(0)->getType();
5199  for (Value *V : VL) {
5200  Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
5201  if (Ty != SrcTy || !isValidElementType(Ty)) {
5202  BS.cancelScheduling(VL, VL0);
5203  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5204  ReuseShuffleIndicies);
5205  LLVM_DEBUG(dbgs()
5206  << "SLP: Gathering casts with different src types.\n");
5207  return;
5208  }
5209  }
5210  TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5211  ReuseShuffleIndicies);
5212  LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
5213 
5214  TE->setOperandsInOrder();
5215  for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
5216  ValueList Operands;
5217  // Prepare the operand vector.
5218  for (Value *V : VL)
5219  Operands.push_back(cast<Instruction>(V)->getOperand(i));
5220 
5221  buildTree_rec(Operands, Depth + 1, {TE, i});
5222  }
5223  return;
5224  }
5225  case Instruction::ICmp:
5226  case Instruction::FCmp: {
5227  // Check that all of the compares have the same predicate.
5228  CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
5230  Type *ComparedTy = VL0->getOperand(0)->getType();
5231  for (Value *V : VL) {
5232  CmpInst *Cmp = cast<CmpInst>(V);
5233  if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
5234  Cmp->getOperand(0)->getType() != ComparedTy) {
5235  BS.cancelScheduling(VL, VL0);
5236  newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5237  ReuseShuffleIndicies);
5238  LLVM_DEBUG(dbgs()
5239  << "SLP: Gathering cmp with different predicate.\n");
5240  return;
5241  }
5242  }
5243 
5244  TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5245  ReuseShuffleIndicies);
5246  LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
5247 
5248  ValueList Left, Right;
5249  if (cast<CmpInst>(VL0)->isCommutative()) {
5250  // Commutative predicate - collect + sort operands of the instructions
5251  // so that each side is more likely to have the same opcode.
5252  assert(P0 == SwapP0 && "Commutative Predicate mismatch");
5253  reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
5254  } else {
5255  // Collect operands - commute if it uses the swapped predicate.
5256  for (Value *V : VL) {
5257  auto *Cmp = cast<CmpInst>(V);
5258  Value *LHS = Cmp->getOperand(0);
5259  Value *RHS = Cmp->getOperand(1);
5260  if (Cmp->getPredicate() != P0)
5261  std::swap(LHS, RHS);
5262  Left.push_back(LHS);
5263  Right.push_back(RHS);
5264  }
5265  }
5266  TE->setOperand(0, Left);
5267  TE->setOperand(1, Right);
5268  buildTree_rec(Left, Depth + 1, {TE, 0});
5269  buildTree_rec(Right, Depth + 1, {TE, 1});
5270  return;
5271  }
5272  case Instruction::Select:
5273  case Instruction::FNeg:
5274  case Instruction::Add:
5275  case Instruction::FAdd:
5276  case Instruction::Sub:
5277  case Instruction::FSub:
5278  case Instruction::Mul:
5279  case Instruction::FMul:
5280  case Instruction::UDiv:
5281  case Instruction::SDiv:
5282  case Instruction::FDiv:
5283  case Instruction::URem:
5284  case Instruction::SRem:
5285  case Instruction::FRem:
5286  case Instruction::Shl:
5287  case Instruction::LShr:
5288  case Instruction::AShr:
5289  case Instruction::And:
5290  case Instruction::Or:
5291  case Instruction::Xor: {
5292  TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5293  ReuseShuffleIndicies);
5294  LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
5295 
5296  // Sort operands of the instructions so that each side is more likely to
5297  // have the same opcode.
5298  if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
5299  ValueList Left, Right;
5300  reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
5301  TE->setOperand(0, Left);
5302  TE->setOperand(1, Right);
5303  buildTree_rec(Left, Depth + 1, {TE, 0});
5304  buildTree_rec(Right, Depth + 1, {TE, 1});
5305  return;
5306  }
5307 
5308  TE->setOperandsInOrder();
5309  for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
5310  ValueList Operands;
5311  // Prepare the operand vector.
5312  for (Value *V : VL)
5313  Operands.push_back(cast<Instruction>(V)->getOperand(i));
5314 
5315  buildTree_rec(Operands, Depth + 1, {TE, i});
5316  }
5317  return;
5318  }
5319  case Instruction::GetElementPtr: {